lucas-ventura commited on
Commit
9ed896f
·
1 Parent(s): 42a8516

Update with dockerfile

Browse files
dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker build -t whisper-webui --build-arg WHISPER_IMPLEMENTATION=faster-whisper .
2
+
3
+ FROM huggingface/transformers-pytorch-gpu
4
+ EXPOSE 7860
5
+
6
+ ARG WHISPER_IMPLEMENTATION=faster-whisper
7
+ ENV WHISPER_IMPLEMENTATION=${WHISPER_IMPLEMENTATION}
8
+
9
+ ADD . /opt/whisper-webui/
10
+
11
+ # Latest version of transformers-pytorch-gpu seems to lack tk.
12
+ # Further, pip install fails, so we must upgrade pip first.
13
+ RUN apt-get -y install python3-tk
14
+ RUN python3 -m pip install --upgrade pip
15
+
16
+ RUN if [ "${WHISPER_IMPLEMENTATION}" = "whisper" ]; then \
17
+ python3 -m pip install -r /opt/whisper-webui/requirements-whisper.txt; \
18
+ else \
19
+ python3 -m pip install -r /opt/whisper-webui/requirements-fasterWhisper.txt; \
20
+ fi
21
+
22
+ # Note: Models will be downloaded on demand to the directory /root/.cache/whisper.
23
+ # You can also bind this directory in the container to somewhere on the host.
24
+
25
+ # To be able to see logs in real time
26
+ ENV PYTHONUNBUFFERED=1
27
+
28
+ WORKDIR /opt/whisper-webui/
29
+ ENTRYPOINT ["python3"]
30
+ CMD ["app.py", "--whisper_implementation", "faster-whisper", "--input_audio_max_duration", "-1", "--server_name", "0.0.0.0", "--auto_parallel", "True"]
pre-requirements.txt DELETED
@@ -1,5 +0,0 @@
1
- ctranslate2==4.4.0
2
- torch
3
- torchaudio
4
- nvidia-cublas-cu12
5
- nvidia-cudnn-cu12==9.*
 
 
 
 
 
 
requirements-fasterWhisper.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers>=4.45.2
2
+ ctranslate2>=4.4.0
3
+ faster-whisper>=1.0.3
4
+ ffmpeg-python==0.2.0
5
+ gradio==5.9.1
6
+ yt-dlp
7
+ json5
8
+ torch
9
+ torchaudio
10
+ more_itertools
11
+ zhconv
12
+ sentencepiece
13
+
14
+ pyannote.audio
15
+ torchmetrics
16
+ lightning
17
+ hydra-core==1.3.2
18
+ hydra-colorlog==1.2.0
19
+ llama-cookbook
20
+ wandb
21
+ rich
22
+ git+https://github.com/lucas-ventura/lutils.git
23
+ git+https://github.com/idriscnrs/idr_torch.git
24
+ pycocoevalcap
25
+ prettytable
26
+ hf_transfer
27
+ bitsandbytes
28
+
29
+ # Needed by ALMA-GPTQ
30
+ accelerate
31
+ auto-gptq
32
+ optimum
33
+ # Needed by ALMA-GGUL
34
+ ctransformers[cuda]
35
+ # Needed by load_in_4bit parameters in transformers
36
+ bitsandbytes
requirements-whisper.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers>=4.45.2
2
+ ctranslate2>=4.4.0
3
+ git+https://github.com/openai/whisper.git
4
+ ffmpeg-python==0.2.0
5
+ gradio==5.9.1
6
+ yt-dlp
7
+ json5
8
+ torch
9
+ torchaudio
10
+ altair
11
+ zhconv
12
+ sentencepiece
13
+
14
+ pyannote.audio
15
+ torchmetrics
16
+ lightning
17
+ hydra-core==1.3.2
18
+ hydra-colorlog==1.2.0
19
+ llama-cookbook
20
+ wandb
21
+ rich
22
+ git+https://github.com/lucas-ventura/lutils.git
23
+ git+https://github.com/idriscnrs/idr_torch.git
24
+ pycocoevalcap
25
+ prettytable
26
+ hf_transfer
27
+ bitsandbytes
28
+
29
+ # Needed by ALMA-GPTQ
30
+ accelerate
31
+ auto-gptq
32
+ optimum
33
+ # Needed by ALMA-GGUL
34
+ ctransformers[cuda]
35
+ # Needed by load_in_4bit parameters in transformers
36
+ bitsandbytes
requirements.txt CHANGED
@@ -1,3 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  pyannote.audio
2
  torchmetrics
3
  lightning
@@ -11,8 +24,13 @@ git+https://github.com/idriscnrs/idr_torch.git
11
  pycocoevalcap
12
  prettytable
13
  hf_transfer
14
- faster-whisper
15
- ctranslate2==4.4.0
16
- gradio
17
  bitsandbytes
18
- yt-dlp
 
 
 
 
 
 
 
 
 
1
+ transformers>=4.45.2
2
+ ctranslate2>=4.4.0
3
+ faster-whisper>=1.0.3
4
+ ffmpeg-python==0.2.0
5
+ gradio==5.9.1
6
+ yt-dlp
7
+ json5
8
+ torch
9
+ torchaudio
10
+ more_itertools
11
+ zhconv
12
+ sentencepiece
13
+
14
  pyannote.audio
15
  torchmetrics
16
  lightning
 
24
  pycocoevalcap
25
  prettytable
26
  hf_transfer
 
 
 
27
  bitsandbytes
28
+
29
+ # Needed by ALMA-GPTQ
30
+ accelerate
31
+ auto-gptq
32
+ optimum
33
+ # Needed by ALMA-GGUL
34
+ ctransformers[cuda]
35
+ # Needed by load_in_4bit parameters in transformers
36
+ bitsandbytes