qfuxa commited on
Commit
00f19b9
·
1 Parent(s): 167e258

core refactoring

Browse files
setup.py CHANGED
@@ -1,7 +1,7 @@
1
  from setuptools import setup, find_packages
2
  setup(
3
  name="whisperlivekit",
4
- version="0.1.7",
5
  description="Real-time, Fully Local Whisper's Speech-to-Text and Speaker Diarization",
6
  long_description=open("README.md", "r", encoding="utf-8").read(),
7
  long_description_content_type="text/markdown",
 
1
  from setuptools import setup, find_packages
2
  setup(
3
  name="whisperlivekit",
4
+ version="0.1.8",
5
  description="Real-time, Fully Local Whisper's Speech-to-Text and Speaker Diarization",
6
  long_description=open("README.md", "r", encoding="utf-8").read(),
7
  long_description_content_type="text/markdown",
whisperlivekit/__init__.py CHANGED
@@ -1,4 +1,5 @@
1
- from .core import WhisperLiveKit, parse_args
2
  from .audio_processor import AudioProcessor
3
-
4
- __all__ = ['WhisperLiveKit', 'AudioProcessor', 'parse_args']
 
 
1
+ from .core import TranscriptionEngine
2
  from .audio_processor import AudioProcessor
3
+ from .web.web_interface import get_web_interface_html
4
+ from .parse_args import parse_args
5
+ __all__ = ['TranscriptionEngine', 'AudioProcessor', 'get_web_interface_html', 'parse_args']
whisperlivekit/audio_processor.py CHANGED
@@ -8,7 +8,7 @@ import traceback
8
  from datetime import timedelta
9
  from whisperlivekit.timed_objects import ASRToken
10
  from whisperlivekit.whisper_streaming_custom.whisper_online import online_factory
11
- from whisperlivekit.core import WhisperLiveKit
12
 
13
  # Set up logging once
14
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
@@ -27,10 +27,13 @@ class AudioProcessor:
27
  Handles audio processing, state management, and result formatting.
28
  """
29
 
30
- def __init__(self):
31
  """Initialize the audio processor with configuration, models, and state."""
32
 
33
- models = WhisperLiveKit()
 
 
 
34
 
35
  # Audio processing settings
36
  self.args = models.args
 
8
  from datetime import timedelta
9
  from whisperlivekit.timed_objects import ASRToken
10
  from whisperlivekit.whisper_streaming_custom.whisper_online import online_factory
11
+ from whisperlivekit.core import TranscriptionEngine
12
 
13
  # Set up logging once
14
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
 
27
  Handles audio processing, state management, and result formatting.
28
  """
29
 
30
+ def __init__(self, **kwargs):
31
  """Initialize the audio processor with configuration, models, and state."""
32
 
33
+ if 'transcription_engine' in kwargs and isinstance(kwargs['transcription_engine'], TranscriptionEngine):
34
+ models = kwargs['transcription_engine']
35
+ else:
36
+ models = TranscriptionEngine(**kwargs)
37
 
38
  # Audio processing settings
39
  self.args = models.args
whisperlivekit/basic_server.py CHANGED
@@ -2,26 +2,24 @@ from contextlib import asynccontextmanager
2
  from fastapi import FastAPI, WebSocket, WebSocketDisconnect
3
  from fastapi.responses import HTMLResponse
4
  from fastapi.middleware.cors import CORSMiddleware
5
-
6
- from whisperlivekit import WhisperLiveKit, parse_args
7
- from whisperlivekit.audio_processor import AudioProcessor
8
-
9
  import asyncio
10
  import logging
11
- import os, sys
12
- import argparse
13
 
14
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
15
  logging.getLogger().setLevel(logging.WARNING)
16
  logger = logging.getLogger(__name__)
17
  logger.setLevel(logging.DEBUG)
18
 
19
- kit = None
 
20
 
21
  @asynccontextmanager
22
  async def lifespan(app: FastAPI):
23
- global kit
24
- kit = WhisperLiveKit()
 
 
25
  yield
26
 
27
  app = FastAPI(lifespan=lifespan)
@@ -33,10 +31,9 @@ app.add_middleware(
33
  allow_headers=["*"],
34
  )
35
 
36
-
37
  @app.get("/")
38
  async def get():
39
- return HTMLResponse(kit.web_interface())
40
 
41
 
42
  async def handle_websocket_results(websocket, results_generator):
@@ -55,8 +52,10 @@ async def handle_websocket_results(websocket, results_generator):
55
 
56
  @app.websocket("/asr")
57
  async def websocket_endpoint(websocket: WebSocket):
58
- audio_processor = AudioProcessor()
59
-
 
 
60
  await websocket.accept()
61
  logger.info("WebSocket connection opened.")
62
 
@@ -94,8 +93,6 @@ def main():
94
  """Entry point for the CLI command."""
95
  import uvicorn
96
 
97
- args = parse_args()
98
-
99
  uvicorn_kwargs = {
100
  "app": "whisperlivekit.basic_server:app",
101
  "host":args.host,
@@ -114,7 +111,6 @@ def main():
114
  "ssl_keyfile": args.ssl_keyfile
115
  }
116
 
117
-
118
  if ssl_kwargs:
119
  uvicorn_kwargs = {**uvicorn_kwargs, **ssl_kwargs}
120
 
 
2
  from fastapi import FastAPI, WebSocket, WebSocketDisconnect
3
  from fastapi.responses import HTMLResponse
4
  from fastapi.middleware.cors import CORSMiddleware
5
+ from whisperlivekit import TranscriptionEngine, AudioProcessor, get_web_interface_html, parse_args
 
 
 
6
  import asyncio
7
  import logging
 
 
8
 
9
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
10
  logging.getLogger().setLevel(logging.WARNING)
11
  logger = logging.getLogger(__name__)
12
  logger.setLevel(logging.DEBUG)
13
 
14
+ args = parse_args()
15
+ transcription_engine = None
16
 
17
  @asynccontextmanager
18
  async def lifespan(app: FastAPI):
19
+ global transcription_engine
20
+ transcription_engine = TranscriptionEngine(
21
+ **vars(args),
22
+ )
23
  yield
24
 
25
  app = FastAPI(lifespan=lifespan)
 
31
  allow_headers=["*"],
32
  )
33
 
 
34
  @app.get("/")
35
  async def get():
36
+ return HTMLResponse(get_web_interface_html())
37
 
38
 
39
  async def handle_websocket_results(websocket, results_generator):
 
52
 
53
  @app.websocket("/asr")
54
  async def websocket_endpoint(websocket: WebSocket):
55
+ global transcription_engine
56
+ audio_processor = AudioProcessor(
57
+ transcription_engine=transcription_engine,
58
+ )
59
  await websocket.accept()
60
  logger.info("WebSocket connection opened.")
61
 
 
93
  """Entry point for the CLI command."""
94
  import uvicorn
95
 
 
 
96
  uvicorn_kwargs = {
97
  "app": "whisperlivekit.basic_server:app",
98
  "host":args.host,
 
111
  "ssl_keyfile": args.ssl_keyfile
112
  }
113
 
 
114
  if ssl_kwargs:
115
  uvicorn_kwargs = {**uvicorn_kwargs, **ssl_kwargs}
116
 
whisperlivekit/core.py CHANGED
@@ -2,148 +2,10 @@ try:
2
  from whisperlivekit.whisper_streaming_custom.whisper_online import backend_factory, warmup_asr
3
  except ImportError:
4
  from .whisper_streaming_custom.whisper_online import backend_factory, warmup_asr
5
- from argparse import Namespace, ArgumentParser
6
 
7
- def parse_args():
8
- parser = ArgumentParser(description="Whisper FastAPI Online Server")
9
- parser.add_argument(
10
- "--host",
11
- type=str,
12
- default="localhost",
13
- help="The host address to bind the server to.",
14
- )
15
- parser.add_argument(
16
- "--port", type=int, default=8000, help="The port number to bind the server to."
17
- )
18
- parser.add_argument(
19
- "--warmup-file",
20
- type=str,
21
- default=None,
22
- dest="warmup_file",
23
- help="""
24
- The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast.
25
- If not set, uses https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav.
26
- If False, no warmup is performed.
27
- """,
28
- )
29
 
30
- parser.add_argument(
31
- "--confidence-validation",
32
- action="store_true",
33
- help="Accelerates validation of tokens using confidence scores. Transcription will be faster but punctuation might be less accurate.",
34
- )
35
-
36
- parser.add_argument(
37
- "--diarization",
38
- action="store_true",
39
- default=False,
40
- help="Enable speaker diarization.",
41
- )
42
-
43
- parser.add_argument(
44
- "--no-transcription",
45
- action="store_true",
46
- help="Disable transcription to only see live diarization results.",
47
- )
48
-
49
- parser.add_argument(
50
- "--min-chunk-size",
51
- type=float,
52
- default=0.5,
53
- help="Minimum audio chunk size in seconds. It waits up to this time to do processing. If the processing takes shorter time, it waits, otherwise it processes the whole segment that was received by this time.",
54
- )
55
-
56
- parser.add_argument(
57
- "--model",
58
- type=str,
59
- default="tiny",
60
- help="Name size of the Whisper model to use (default: tiny). Suggested values: tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large,large-v3-turbo. The model is automatically downloaded from the model hub if not present in model cache dir.",
61
- )
62
-
63
- parser.add_argument(
64
- "--model_cache_dir",
65
- type=str,
66
- default=None,
67
- help="Overriding the default model cache dir where models downloaded from the hub are saved",
68
- )
69
- parser.add_argument(
70
- "--model_dir",
71
- type=str,
72
- default=None,
73
- help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.",
74
- )
75
- parser.add_argument(
76
- "--lan",
77
- "--language",
78
- type=str,
79
- default="auto",
80
- help="Source language code, e.g. en,de,cs, or 'auto' for language detection.",
81
- )
82
- parser.add_argument(
83
- "--task",
84
- type=str,
85
- default="transcribe",
86
- choices=["transcribe", "translate"],
87
- help="Transcribe or translate.",
88
- )
89
- parser.add_argument(
90
- "--backend",
91
- type=str,
92
- default="faster-whisper",
93
- choices=["faster-whisper", "whisper_timestamped", "mlx-whisper", "openai-api"],
94
- help="Load only this backend for Whisper processing.",
95
- )
96
- parser.add_argument(
97
- "--vac",
98
- action="store_true",
99
- default=False,
100
- help="Use VAC = voice activity controller. Recommended. Requires torch.",
101
- )
102
- parser.add_argument(
103
- "--vac-chunk-size", type=float, default=0.04, help="VAC sample size in seconds."
104
- )
105
-
106
- parser.add_argument(
107
- "--no-vad",
108
- action="store_true",
109
- help="Disable VAD (voice activity detection).",
110
- )
111
-
112
- parser.add_argument(
113
- "--buffer_trimming",
114
- type=str,
115
- default="segment",
116
- choices=["sentence", "segment"],
117
- help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.',
118
- )
119
- parser.add_argument(
120
- "--buffer_trimming_sec",
121
- type=float,
122
- default=15,
123
- help="Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.",
124
- )
125
- parser.add_argument(
126
- "-l",
127
- "--log-level",
128
- dest="log_level",
129
- choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
130
- help="Set the log level",
131
- default="DEBUG",
132
- )
133
- parser.add_argument("--ssl-certfile", type=str, help="Path to the SSL certificate file.", default=None)
134
- parser.add_argument("--ssl-keyfile", type=str, help="Path to the SSL private key file.", default=None)
135
-
136
-
137
- args = parser.parse_args()
138
-
139
- args.transcription = not args.no_transcription
140
- args.vad = not args.no_vad
141
- delattr(args, 'no_transcription')
142
- delattr(args, 'no_vad')
143
-
144
- return args
145
-
146
- class WhisperLiveKit:
147
  _instance = None
148
  _initialized = False
149
 
@@ -153,14 +15,48 @@ class WhisperLiveKit:
153
  return cls._instance
154
 
155
  def __init__(self, **kwargs):
156
- if WhisperLiveKit._initialized:
157
  return
158
-
159
- default_args = vars(parse_args())
160
-
161
- merged_args = {**default_args, **kwargs}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
- self.args = Namespace(**merged_args)
 
 
 
 
 
 
 
164
 
165
  self.asr = None
166
  self.tokenizer = None
@@ -174,11 +70,4 @@ class WhisperLiveKit:
174
  from whisperlivekit.diarization.diarization_online import DiartDiarization
175
  self.diarization = DiartDiarization()
176
 
177
- WhisperLiveKit._initialized = True
178
-
179
- def web_interface(self):
180
- import pkg_resources
181
- html_path = pkg_resources.resource_filename('whisperlivekit', 'web/live_transcription.html')
182
- with open(html_path, "r", encoding="utf-8") as f:
183
- html = f.read()
184
- return html
 
2
  from whisperlivekit.whisper_streaming_custom.whisper_online import backend_factory, warmup_asr
3
  except ImportError:
4
  from .whisper_streaming_custom.whisper_online import backend_factory, warmup_asr
5
+ from argparse import Namespace
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ class TranscriptionEngine:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  _instance = None
10
  _initialized = False
11
 
 
15
  return cls._instance
16
 
17
  def __init__(self, **kwargs):
18
+ if TranscriptionEngine._initialized:
19
  return
20
+
21
+ defaults = {
22
+ "host": "localhost",
23
+ "port": 8000,
24
+ "warmup_file": None,
25
+ "confidence_validation": False,
26
+ "diarization": False,
27
+ "min_chunk_size": 0.5,
28
+ "model": "tiny",
29
+ "model_cache_dir": None,
30
+ "model_dir": None,
31
+ "lan": "auto",
32
+ "task": "transcribe",
33
+ "backend": "faster-whisper",
34
+ "vac": False,
35
+ "vac_chunk_size": 0.04,
36
+ "buffer_trimming": "segment",
37
+ "buffer_trimming_sec": 15,
38
+ "log_level": "DEBUG",
39
+ "ssl_certfile": None,
40
+ "ssl_keyfile": None,
41
+ "transcription": True,
42
+ "vad": True,
43
+ }
44
+
45
+ config_dict = {**defaults, **kwargs}
46
+
47
+ if 'no_transcription' in kwargs:
48
+ config_dict['transcription'] = not kwargs['no_transcription']
49
+ if 'no_vad' in kwargs:
50
+ config_dict['vad'] = not kwargs['no_vad']
51
 
52
+ config_dict.pop('no_transcription', None)
53
+ config_dict.pop('no_vad', None)
54
+
55
+ if 'language' in kwargs:
56
+ config_dict['lan'] = kwargs['language']
57
+ config_dict.pop('language', None)
58
+
59
+ self.args = Namespace(**config_dict)
60
 
61
  self.asr = None
62
  self.tokenizer = None
 
70
  from whisperlivekit.diarization.diarization_online import DiartDiarization
71
  self.diarization = DiartDiarization()
72
 
73
+ TranscriptionEngine._initialized = True
 
 
 
 
 
 
 
whisperlivekit/parse_args.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from argparse import ArgumentParser
3
+
4
+ def parse_args():
5
+ parser = ArgumentParser(description="Whisper FastAPI Online Server")
6
+ parser.add_argument(
7
+ "--host",
8
+ type=str,
9
+ default="localhost",
10
+ help="The host address to bind the server to.",
11
+ )
12
+ parser.add_argument(
13
+ "--port", type=int, default=8000, help="The port number to bind the server to."
14
+ )
15
+ parser.add_argument(
16
+ "--warmup-file",
17
+ type=str,
18
+ default=None,
19
+ dest="warmup_file",
20
+ help="""
21
+ The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast.
22
+ If not set, uses https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav.
23
+ If False, no warmup is performed.
24
+ """,
25
+ )
26
+
27
+ parser.add_argument(
28
+ "--confidence-validation",
29
+ action="store_true",
30
+ help="Accelerates validation of tokens using confidence scores. Transcription will be faster but punctuation might be less accurate.",
31
+ )
32
+
33
+ parser.add_argument(
34
+ "--diarization",
35
+ action="store_true",
36
+ default=False,
37
+ help="Enable speaker diarization.",
38
+ )
39
+
40
+ parser.add_argument(
41
+ "--no-transcription",
42
+ action="store_true",
43
+ help="Disable transcription to only see live diarization results.",
44
+ )
45
+
46
+ parser.add_argument(
47
+ "--min-chunk-size",
48
+ type=float,
49
+ default=0.5,
50
+ help="Minimum audio chunk size in seconds. It waits up to this time to do processing. If the processing takes shorter time, it waits, otherwise it processes the whole segment that was received by this time.",
51
+ )
52
+
53
+ parser.add_argument(
54
+ "--model",
55
+ type=str,
56
+ default="tiny",
57
+ help="Name size of the Whisper model to use (default: tiny). Suggested values: tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large,large-v3-turbo. The model is automatically downloaded from the model hub if not present in model cache dir.",
58
+ )
59
+
60
+ parser.add_argument(
61
+ "--model_cache_dir",
62
+ type=str,
63
+ default=None,
64
+ help="Overriding the default model cache dir where models downloaded from the hub are saved",
65
+ )
66
+ parser.add_argument(
67
+ "--model_dir",
68
+ type=str,
69
+ default=None,
70
+ help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.",
71
+ )
72
+ parser.add_argument(
73
+ "--lan",
74
+ "--language",
75
+ type=str,
76
+ default="auto",
77
+ help="Source language code, e.g. en,de,cs, or 'auto' for language detection.",
78
+ )
79
+ parser.add_argument(
80
+ "--task",
81
+ type=str,
82
+ default="transcribe",
83
+ choices=["transcribe", "translate"],
84
+ help="Transcribe or translate.",
85
+ )
86
+ parser.add_argument(
87
+ "--backend",
88
+ type=str,
89
+ default="faster-whisper",
90
+ choices=["faster-whisper", "whisper_timestamped", "mlx-whisper", "openai-api"],
91
+ help="Load only this backend for Whisper processing.",
92
+ )
93
+ parser.add_argument(
94
+ "--vac",
95
+ action="store_true",
96
+ default=False,
97
+ help="Use VAC = voice activity controller. Recommended. Requires torch.",
98
+ )
99
+ parser.add_argument(
100
+ "--vac-chunk-size", type=float, default=0.04, help="VAC sample size in seconds."
101
+ )
102
+
103
+ parser.add_argument(
104
+ "--no-vad",
105
+ action="store_true",
106
+ help="Disable VAD (voice activity detection).",
107
+ )
108
+
109
+ parser.add_argument(
110
+ "--buffer_trimming",
111
+ type=str,
112
+ default="segment",
113
+ choices=["sentence", "segment"],
114
+ help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.',
115
+ )
116
+ parser.add_argument(
117
+ "--buffer_trimming_sec",
118
+ type=float,
119
+ default=15,
120
+ help="Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.",
121
+ )
122
+ parser.add_argument(
123
+ "-l",
124
+ "--log-level",
125
+ dest="log_level",
126
+ choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
127
+ help="Set the log level",
128
+ default="DEBUG",
129
+ )
130
+ parser.add_argument("--ssl-certfile", type=str, help="Path to the SSL certificate file.", default=None)
131
+ parser.add_argument("--ssl-keyfile", type=str, help="Path to the SSL private key file.", default=None)
132
+
133
+
134
+ args = parser.parse_args()
135
+
136
+ args.transcription = not args.no_transcription
137
+ args.vad = not args.no_vad
138
+ delattr(args, 'no_transcription')
139
+ delattr(args, 'no_vad')
140
+
141
+ return args
whisperlivekit/web/web_interface.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import importlib.resources as resources
3
+
4
+ logger = logging.getLogger(__name__)
5
+
6
+ def get_web_interface_html():
7
+ """Loads the HTML for the web interface using importlib.resources."""
8
+ try:
9
+ with resources.files('whisperlivekit.web').joinpath('live_transcription.html').open('r', encoding='utf-8') as f:
10
+ return f.read()
11
+ except Exception as e:
12
+ logger.error(f"Error loading web interface HTML: {e}")
13
+ return "<html><body><h1>Error loading interface</h1></body></html>"