Omnibus commited on
Commit
f74448c
·
verified ·
1 Parent(s): b315463

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -45,11 +45,11 @@ def format_prompt(message, history):
45
  #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
46
  for user_prompt, bot_response in history:
47
  prompt += f"{user_prompt}\n"
48
- print(prompt)
49
  prompt += f"{bot_response}\n"
50
- print(prompt)
51
  prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
52
- print(prompt)
53
  return prompt
54
 
55
 
@@ -64,7 +64,8 @@ def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,r
64
  hist_len=len(history)
65
  print(hist_len)
66
  in_len=len(system_prompt+prompt)+hist_len
67
- print("\n#########"+str(in_len))
 
68
  if (in_len+tokens) > 8000:
69
  yield [(prompt,"Wait. I need to compress our Chat history...")]
70
  history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)
 
45
  #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
46
  for user_prompt, bot_response in history:
47
  prompt += f"{user_prompt}\n"
48
+ #print(prompt)
49
  prompt += f"{bot_response}\n"
50
+ #print(prompt)
51
  prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
52
+ #print(prompt)
53
  return prompt
54
 
55
 
 
64
  hist_len=len(history)
65
  print(hist_len)
66
  in_len=len(system_prompt+prompt)+hist_len
67
+ print("\n######### HIST "+str(in_len))
68
+ print("\n######### TOKENS "+str(tokens))
69
  if (in_len+tokens) > 8000:
70
  yield [(prompt,"Wait. I need to compress our Chat history...")]
71
  history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)