eaglelandsonce commited on
Commit
524a7b5
·
1 Parent(s): def6d9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -185,11 +185,8 @@ def main():
185
  filename = generate_filename(f"{st.session_state.text_content}_section_{i+1}", choice)
186
  create_file(filename, st.session_state.text_content, response)
187
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
188
-
189
 
190
 
191
-
192
-
193
  if st.button('💬 Chat'):
194
  st.write('Reasoning with your inputs...')
195
  response = chat_with_model(st.session_state.text_content, ''.join(list(document_sections)))
@@ -201,7 +198,7 @@ def main():
201
  #st.write(response)
202
  filename = generate_filename(st.session_state.text_content, choice)
203
  create_file(filename, st.session_state.text_content, response)
204
- st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
205
 
206
  # with st.sidebar.expander("Recording Options", expanded=False):
207
  # Audio, transcribe, GPT:
@@ -209,7 +206,8 @@ def main():
209
  if filename is not None:
210
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
211
  # push transcript through as prompt
212
- st.session_state.text_content = transcription
 
213
 
214
 
215
 
 
185
  filename = generate_filename(f"{st.session_state.text_content}_section_{i+1}", choice)
186
  create_file(filename, st.session_state.text_content, response)
187
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
188
 
189
 
 
 
190
  if st.button('💬 Chat'):
191
  st.write('Reasoning with your inputs...')
192
  response = chat_with_model(st.session_state.text_content, ''.join(list(document_sections)))
 
198
  #st.write(response)
199
  filename = generate_filename(st.session_state.text_content, choice)
200
  create_file(filename, st.session_state.text_content, response)
201
+
202
 
203
  # with st.sidebar.expander("Recording Options", expanded=False):
204
  # Audio, transcribe, GPT:
 
206
  if filename is not None:
207
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
208
  # push transcript through as prompt
209
+ json_str2 = json.dumps(transcription, indent=4)
210
+ st.session_state.text_content = json_str2
211
 
212
 
213