RamAnanth1 commited on
Commit
164ab71
·
1 Parent(s): f484acc

Revert to not using API as argument

Browse files
Files changed (1) hide show
  1. app.py +13 -24
app.py CHANGED
@@ -6,7 +6,8 @@ import random
6
  from transformers import pipeline
7
  import torch
8
 
9
- session_token = os.environ.get('SessionToken')
 
10
 
11
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
12
 
@@ -21,41 +22,29 @@ all_special_ids = whisper_model.tokenizer.all_special_ids
21
  transcribe_token_id = all_special_ids[-5]
22
  translate_token_id = all_special_ids[-6]
23
 
24
- def get_api():
25
- api = None
26
- try:
27
- api = ChatGPT(session_token)
28
- # api.refresh_auth()
29
- except:
30
- api = None
31
- return api
32
-
33
  def translate_or_transcribe(audio, task):
34
  whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="Transcribe in Spoken Language" else translate_token_id]]
35
  text = whisper_model(audio)["text"]
36
  return text
37
 
38
- def get_response_from_chatbot(api,text):
39
- if api is None:
40
- return "Sorry, the chatGPT API has some issues. Please try again later"
41
  try:
42
- resp = api.send_message(text)
43
- api.refresh_auth()
44
- # api.reset_conversation()
45
- response = resp['message']
46
  except:
47
- response = "Sorry, the chatGPT queue is full. Please try again later"
48
  return response
49
 
50
- def chat(api,message, chat_history):
51
  out_chat = []
52
  if chat_history != '':
53
  out_chat = json.loads(chat_history)
54
- response = get_response_from_chatbot(api,message)
55
  out_chat.append((message, response))
56
  chat_history = json.dumps(out_chat)
57
  logger.info(f"out_chat_: {len(out_chat)}")
58
- return api,out_chat, chat_history
59
 
60
  start_work = """async() => {
61
  function isMobile() {
@@ -184,10 +173,10 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
184
  outputs=prompt_input
185
  )
186
 
187
- api = gr.State(value=get_api())
188
  submit_btn.click(fn=chat,
189
- inputs=[api,prompt_input, chat_history],
190
- outputs=[api,chatbot, chat_history],
191
  )
192
  gr.HTML('''
193
  <p>Note: Please be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)</p>
 
6
  from transformers import pipeline
7
  import torch
8
 
9
+ session_token = os.environ.get('SessionToken')
10
+ api = ChatGPT(session_token)
11
 
12
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
13
 
 
22
  transcribe_token_id = all_special_ids[-5]
23
  translate_token_id = all_special_ids[-6]
24
 
 
 
 
 
 
 
 
 
 
25
  def translate_or_transcribe(audio, task):
26
  whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="Transcribe in Spoken Language" else translate_token_id]]
27
  text = whisper_model(audio)["text"]
28
  return text
29
 
30
+ def get_response_from_chatbot(text):
 
 
31
  try:
32
+ resp = api.send_message(text)
33
+ response = resp['message']
34
+ # logger.info(f"response_: {response}")
 
35
  except:
36
+ response = "Sorry, the chatGPT queue is full. Please try again in some time"
37
  return response
38
 
39
+ def chat(message, chat_history):
40
  out_chat = []
41
  if chat_history != '':
42
  out_chat = json.loads(chat_history)
43
+ response = get_response_from_chatbot(message)
44
  out_chat.append((message, response))
45
  chat_history = json.dumps(out_chat)
46
  logger.info(f"out_chat_: {len(out_chat)}")
47
+ return out_chat, chat_history
48
 
49
  start_work = """async() => {
50
  function isMobile() {
 
173
  outputs=prompt_input
174
  )
175
 
176
+
177
  submit_btn.click(fn=chat,
178
+ inputs=[prompt_input, chat_history],
179
+ outputs=[chatbot, chat_history],
180
  )
181
  gr.HTML('''
182
  <p>Note: Please be aware that audio records from iOS devices will not be decoded as expected by Gradio. For the best experience, record your voice from a computer instead of your smartphone ;)</p>