Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -563,7 +563,7 @@ def read_file_content(file,max_length):
|
|
| 563 |
|
| 564 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
| 565 |
@st.cache_resource
|
| 566 |
-
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
| 567 |
model = model_choice
|
| 568 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
| 569 |
conversation.append({'role': 'user', 'content': prompt})
|
|
@@ -760,34 +760,34 @@ def whisper_main():
|
|
| 760 |
filename = save_and_play_audio(audio_recorder)
|
| 761 |
if filename is not None:
|
| 762 |
transcription = transcribe_audio(filename)
|
| 763 |
-
try:
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
|
| 767 |
-
|
| 768 |
-
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
| 769 |
-
st.write('Reasoning with your inputs with GPT..')
|
| 770 |
-
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
| 771 |
-
st.write('Response:')
|
| 772 |
-
st.write(response)
|
| 773 |
-
filename = generate_filename(user_prompt, choice)
|
| 774 |
-
create_file(filename, user_prompt, response, should_save)
|
| 775 |
-
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
| 776 |
-
|
| 777 |
|
| 778 |
-
|
| 779 |
-
|
| 780 |
-
|
| 781 |
-
|
| 782 |
-
|
| 783 |
-
|
| 784 |
-
|
| 785 |
-
|
| 786 |
-
|
| 787 |
-
|
| 788 |
|
| 789 |
-
|
| 790 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 791 |
|
| 792 |
|
| 793 |
import streamlit as st
|
|
|
|
| 563 |
|
| 564 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
| 565 |
@st.cache_resource
|
| 566 |
+
def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
| 567 |
model = model_choice
|
| 568 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
| 569 |
conversation.append({'role': 'user', 'content': prompt})
|
|
|
|
| 760 |
filename = save_and_play_audio(audio_recorder)
|
| 761 |
if filename is not None:
|
| 762 |
transcription = transcribe_audio(filename)
|
| 763 |
+
#try:
|
| 764 |
+
transcript = transcription['text']
|
| 765 |
+
st.write(transcript)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 766 |
|
| 767 |
+
|
| 768 |
+
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
| 769 |
+
st.write('Reasoning with your inputs with GPT..')
|
| 770 |
+
response = chat_with_model(transcript)
|
| 771 |
+
st.write('Response:')
|
| 772 |
+
st.write(response)
|
| 773 |
+
filename = generate_filename(transcript, "txt")
|
| 774 |
+
create_file(filename, transcript, response, should_save)
|
| 775 |
+
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
| 776 |
+
|
| 777 |
|
| 778 |
+
|
| 779 |
+
# Whisper to Llama:
|
| 780 |
+
response = StreamLLMChatResponse(transcript)
|
| 781 |
+
filename_txt = generate_filename(transcript, ".txt")
|
| 782 |
+
create_file(filename_txt, transcript, response, should_save)
|
| 783 |
+
filename_wav = filename_txt.replace('.txt', '.wav')
|
| 784 |
+
import shutil
|
| 785 |
+
shutil.copyfile(filename, filename_wav)
|
| 786 |
+
if os.path.exists(filename):
|
| 787 |
+
os.remove(filename)
|
| 788 |
+
|
| 789 |
+
#except:
|
| 790 |
+
# st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
|
| 791 |
|
| 792 |
|
| 793 |
import streamlit as st
|