Anmol Gupta commited on
Commit
842813f
·
1 Parent(s): e112b4a

make error

Browse files
Files changed (3) hide show
  1. Makefile +27 -0
  2. app.py +47 -0
  3. requirements.txt +3 -0
Makefile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ install:
2
+ pip install --upgrade pip &&\
3
+ pip install -r requirements.txt
4
+
5
+ test:
6
+ python -m pytest -vvv --cov=hello --cov=greeting \
7
+ --cov=smath --cov=web tests
8
+ python -m pytest --nbval notebook.ipynb #tests our jupyter notebook
9
+ #python -m pytest -v tests/test_web.py #if you just want to test web
10
+
11
+ debug:
12
+ python -m pytest -vv --pdb #Debugger is invoked
13
+
14
+ one-test:
15
+ python -m pytest -vv tests/test_greeting.py::test_my_name4
16
+
17
+ debugthree:
18
+ #not working the way I expect
19
+ python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures
20
+
21
+ format:
22
+ black *.py
23
+
24
+ lint:
25
+ pylint --disable=R,C *.py
26
+
27
+ all: install lint test format
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import gradio as gr
3
+ import time
4
+ from pyChatGPT import ChatGPT
5
+ import warnings
6
+
7
+ model = whisper.load_model("base")
8
+
9
+ def transcribe(audio):
10
+
11
+ # load audio and pad/trim it to fit 30 seconds
12
+ audio = whisper.load_audio(audio)
13
+ audio = whisper.pad_or_trim(audio)
14
+
15
+ # make log-Mel spectrogram and move to the same device as the model
16
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
17
+
18
+ # detect the spoken language
19
+ _, probs = model.detect_language(mel)
20
+
21
+ # decode the audio
22
+ options = whisper.DecodingOptions()
23
+ result = whisper.decode(model, mel, options)
24
+ result_text = result.text
25
+
26
+ # Pass the generated text to Audio
27
+ chatgpt_api = ChatGPT(email='[email protected]', password='vq3!a^iRKr')
28
+ resp = chatgpt_api.send_message(result_text)
29
+ out_result = resp['message']
30
+
31
+ return [result_text, out_result]
32
+
33
+ output_1 = gr.Textbox(label="Speech to Text")
34
+ output_2 = gr.Textbox(label="ChatGPT Output")
35
+
36
+
37
+ gr.Interface(
38
+ title = 'OpenAI Whisper and ChatGPT ASR Gradio Web UI',
39
+ fn=transcribe,
40
+ inputs=[
41
+ gr.inputs.Audio(source="microphone", type="filepath")
42
+ ],
43
+
44
+ outputs=[
45
+ output_1, output_2
46
+ ],
47
+ live=True).launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ git+https://github.com/openai/whisper.git
3
+ PyChatGPT