Omnibus commited on
Commit
0045bd6
1 Parent(s): 75f7be4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -40,7 +40,7 @@ def compress_history(history,temperature=temperature,top_p=top_p,repetition_pena
40
 
41
  MAX_HISTORY=100
42
 
43
- def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,repetition_penalty=repetition_penalty):
44
  temperature = float(temperature)
45
  if temperature < 1e-2:
46
  temperature = 1e-2
@@ -65,7 +65,7 @@ def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,
65
  print(f'cnt:: {cnt}')
66
  if cnt > MAX_HISTORY:
67
  history1 = compress_history(str(history), temperature, max_new_tokens, top_p, repetition_penalty)
68
- formatted_prompt = format_prompt(f"{GAME_MASTER.format(history=history1)}, {prompt}", history)
69
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
70
  output = ""
71
 
@@ -79,8 +79,13 @@ def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,
79
  lines = output.strip().strip("\n").split("\n")
80
  #history=""
81
  for i,line in enumerate(lines):
82
- if line.startswith("1. "):
83
- print(line)
 
 
 
 
 
84
  if line.startswith("2. "):
85
  print(line)
86
  if line.startswith("3. "):
@@ -92,9 +97,9 @@ def generate(prompt, history,max_new_tokens,temperature=temperature,top_p=top_p,
92
 
93
  if history:
94
  history.append((prompt,output))
95
- yield "",history
96
  else:
97
- yield "",[(prompt,output)]
98
 
99
  def clear_fn():
100
  return None,None
@@ -114,12 +119,12 @@ with gr.Blocks() as app:
114
  with gr.Row():
115
  tokens = gr.Slider(label="Max new tokens",value=1048,minimum=0,maximum=1048*10,step=64,interactive=True,info="The maximum numbers of new tokens")
116
  json_out=gr.JSON()
117
- e_box=gr.Textbox()
118
  #text=gr.JSON()
119
  #inp_query.change(search_models,inp_query,models_dd)
120
  #test_b=test_btn.click(itt,url,e_box)
121
  clear_btn.click(clear_fn,None,[prompt,chatbot])
122
- go=button.click(generate,[prompt,chatbot,tokens],[prompt,chatbot])
123
  stop_button.click(None,None,None,cancels=[go])
124
  app.launch(show_api=False)
125
 
 
40
 
41
  MAX_HISTORY=100
42
 
43
+ def generate(prompt, history,max_new_tokens,health,temperature=temperature,top_p=top_p,repetition_penalty=repetition_penalty):
44
  temperature = float(temperature)
45
  if temperature < 1e-2:
46
  temperature = 1e-2
 
65
  print(f'cnt:: {cnt}')
66
  if cnt > MAX_HISTORY:
67
  history1 = compress_history(str(history), temperature, max_new_tokens, top_p, repetition_penalty)
68
+ formatted_prompt = format_prompt(f"{GAME_MASTER.format(history=history1,health=health)}, {prompt}", history)
69
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
70
  output = ""
71
 
 
79
  lines = output.strip().strip("\n").split("\n")
80
  #history=""
81
  for i,line in enumerate(lines):
82
+ if line.startswith("Health: "):
83
+ try:
84
+ new_health = line.split("Health: ")[1]
85
+ health+=int(new_health)
86
+ print(health)
87
+ except Exception as e:
88
+ print (f'{health}--Error :: {e}')
89
  if line.startswith("2. "):
90
  print(line)
91
  if line.startswith("3. "):
 
97
 
98
  if history:
99
  history.append((prompt,output))
100
+ yield "",history,health
101
  else:
102
+ yield "",[(prompt,output)],health
103
 
104
  def clear_fn():
105
  return None,None
 
119
  with gr.Row():
120
  tokens = gr.Slider(label="Max new tokens",value=1048,minimum=0,maximum=1048*10,step=64,interactive=True,info="The maximum numbers of new tokens")
121
  json_out=gr.JSON()
122
+ health=gr.Number(value=100)
123
  #text=gr.JSON()
124
  #inp_query.change(search_models,inp_query,models_dd)
125
  #test_b=test_btn.click(itt,url,e_box)
126
  clear_btn.click(clear_fn,None,[prompt,chatbot])
127
+ go=button.click(generate,[prompt,chatbot,tokens,health],[prompt,chatbot,health])
128
  stop_button.click(None,None,None,cancels=[go])
129
  app.launch(show_api=False)
130