Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,28 +17,8 @@ InferenceClient(models[2]),
|
|
17 |
InferenceClient(models[3]),
|
18 |
]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
COMPRESS_HISTORY="""You are an Information Summarizer Agent. Your duty is to summarize the following information into a more concise format with far less words.
|
23 |
-
Retain all the main points and provide a brief and concise summary of the conversation.
|
24 |
-
Converstion:
|
25 |
-
{history}"""
|
26 |
-
print("COMPRESSING")
|
27 |
-
formatted_prompt=f"{COMPRESS_HISTORY.format(history=history[0-chat_mem:])}"
|
28 |
-
generate_kwargs = dict(
|
29 |
-
temperature=temp,
|
30 |
-
max_new_tokens=1024,
|
31 |
-
top_p=top_p,
|
32 |
-
repetition_penalty=rep_p,
|
33 |
-
do_sample=True,
|
34 |
-
seed=seed,
|
35 |
-
)
|
36 |
-
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
37 |
-
output = ""
|
38 |
-
for response in stream:
|
39 |
-
output += response.token.text
|
40 |
-
return output
|
41 |
-
|
42 |
def format_prompt(message, history):
|
43 |
prompt = ""
|
44 |
if history:
|
@@ -49,10 +29,7 @@ def format_prompt(message, history):
|
|
49 |
prompt += f"{bot_response}\n"
|
50 |
#print(prompt)
|
51 |
prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
|
52 |
-
#print(prompt)
|
53 |
return prompt
|
54 |
-
result = []
|
55 |
-
|
56 |
|
57 |
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
|
58 |
#token max=8192
|
@@ -67,18 +44,11 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
|
|
67 |
if memory:
|
68 |
for ea in memory[0-chat_mem:]:
|
69 |
hist_len+=len(str(ea))
|
70 |
-
print(hist_len)
|
71 |
in_len=len(system_prompt+prompt)+hist_len
|
72 |
-
|
73 |
-
|
74 |
-
print("\n######### HIST "+str(in_len))
|
75 |
-
print("\n######### TOKENS "+str(tokens))
|
76 |
if (in_len+tokens) > 8000:
|
77 |
history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
|
78 |
yield history,memory
|
79 |
-
#hist=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem)
|
80 |
-
#yield [(prompt,"History has been compressed, processing request...")]
|
81 |
-
#history.append((prompt,hist))
|
82 |
else:
|
83 |
generate_kwargs = dict(
|
84 |
temperature=temp,
|
@@ -88,9 +58,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
|
|
88 |
do_sample=True,
|
89 |
seed=seed,
|
90 |
)
|
91 |
-
#formatted_prompt=prompt
|
92 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
|
93 |
-
print("\n######### PROMPT "+str(len(formatted_prompt)))
|
94 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
95 |
output = ""
|
96 |
for response in stream:
|
@@ -99,6 +67,10 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
|
|
99 |
history.append((prompt,output))
|
100 |
memory.append((prompt,output))
|
101 |
yield history,memory
|
|
|
|
|
|
|
|
|
102 |
|
103 |
def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
|
104 |
print(chatblock)
|
@@ -110,18 +82,15 @@ def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",w
|
|
110 |
print(out)
|
111 |
return out
|
112 |
|
113 |
-
|
114 |
-
|
115 |
def clear_fn():
|
116 |
return None,None,None
|
117 |
rand_val=random.randint(1,1111111111111111)
|
|
|
118 |
def check_rand(inp,val):
|
119 |
if inp==True:
|
120 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
|
121 |
else:
|
122 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
|
123 |
-
|
124 |
-
|
125 |
|
126 |
with gr.Blocks() as app:
|
127 |
memory=gr.State()
|
@@ -140,7 +109,6 @@ with gr.Blocks() as app:
|
|
140 |
stop_btn=gr.Button("Stop")
|
141 |
clear_btn=gr.Button("Clear")
|
142 |
client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
|
143 |
-
|
144 |
with gr.Column(scale=1):
|
145 |
with gr.Group():
|
146 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
@@ -162,14 +130,10 @@ with gr.Blocks() as app:
|
|
162 |
wait_time=gr.Number(label="Wait Time",value=3000)
|
163 |
theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
|
164 |
chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
|
165 |
-
|
166 |
-
|
167 |
|
168 |
im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
|
169 |
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
170 |
-
|
171 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
172 |
-
|
173 |
stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
|
174 |
-
clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b])
|
175 |
app.queue(default_concurrency_limit=10).launch()
|
|
|
17 |
InferenceClient(models[3]),
|
18 |
]
|
19 |
|
20 |
+
VERBOSE=True
|
21 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
def format_prompt(message, history):
|
23 |
prompt = ""
|
24 |
if history:
|
|
|
29 |
prompt += f"{bot_response}\n"
|
30 |
#print(prompt)
|
31 |
prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
|
|
|
32 |
return prompt
|
|
|
|
|
33 |
|
34 |
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
|
35 |
#token max=8192
|
|
|
44 |
if memory:
|
45 |
for ea in memory[0-chat_mem:]:
|
46 |
hist_len+=len(str(ea))
|
|
|
47 |
in_len=len(system_prompt+prompt)+hist_len
|
48 |
+
|
|
|
|
|
|
|
49 |
if (in_len+tokens) > 8000:
|
50 |
history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
|
51 |
yield history,memory
|
|
|
|
|
|
|
52 |
else:
|
53 |
generate_kwargs = dict(
|
54 |
temperature=temp,
|
|
|
58 |
do_sample=True,
|
59 |
seed=seed,
|
60 |
)
|
|
|
61 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
|
|
|
62 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
63 |
output = ""
|
64 |
for response in stream:
|
|
|
67 |
history.append((prompt,output))
|
68 |
memory.append((prompt,output))
|
69 |
yield history,memory
|
70 |
+
if VERBOSE==True:
|
71 |
+
print("\n######### HIST "+str(in_len))
|
72 |
+
print("\n######### TOKENS "+str(tokens))
|
73 |
+
print("\n######### PROMPT "+str(len(formatted_prompt)))
|
74 |
|
75 |
def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
|
76 |
print(chatblock)
|
|
|
82 |
print(out)
|
83 |
return out
|
84 |
|
|
|
|
|
85 |
def clear_fn():
|
86 |
return None,None,None
|
87 |
rand_val=random.randint(1,1111111111111111)
|
88 |
+
|
89 |
def check_rand(inp,val):
|
90 |
if inp==True:
|
91 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
|
92 |
else:
|
93 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
|
|
|
|
|
94 |
|
95 |
with gr.Blocks() as app:
|
96 |
memory=gr.State()
|
|
|
109 |
stop_btn=gr.Button("Stop")
|
110 |
clear_btn=gr.Button("Clear")
|
111 |
client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
|
|
|
112 |
with gr.Column(scale=1):
|
113 |
with gr.Group():
|
114 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
|
|
130 |
wait_time=gr.Number(label="Wait Time",value=3000)
|
131 |
theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
|
132 |
chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
|
|
|
|
|
133 |
|
134 |
im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
|
135 |
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
|
|
136 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
|
|
|
137 |
stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
|
138 |
+
clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
|
139 |
app.queue(default_concurrency_limit=10).launch()
|