Update app.py
Browse files
app.py
CHANGED
@@ -1,528 +1,179 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
17 |
-
data.update(kwargs)
|
18 |
-
print(json.dumps(data))
|
19 |
-
if API_URL is None:
|
20 |
-
raise ValueError(f'API_URL envvar is not set!')
|
21 |
-
try:
|
22 |
-
response = requests.post(API_URL, json=data, timeout=10)
|
23 |
-
except requests.exceptions.Timeout:
|
24 |
-
raise ValueError('Web request timed out. Please try again later.')
|
25 |
-
except requests.exceptions.RequestException as e:
|
26 |
-
raise ValueError(f'Web request error: {e}')
|
27 |
-
if response.status_code == 200:
|
28 |
-
result = response.json()
|
29 |
-
else:
|
30 |
-
raise ValueError(f'HTTP error {response.status_code}: {response.json()}')
|
31 |
-
if DEBUG:
|
32 |
-
print(result)
|
33 |
-
return result
|
34 |
-
|
35 |
-
def format_tokenization_info(result):
|
36 |
-
if not ('token_ids' in result and 'tokens' in result):
|
37 |
-
return ''
|
38 |
-
token_ids = result['token_ids']
|
39 |
-
tokens = result['tokens']
|
40 |
-
if type(token_ids) == list and all([type(token_id) == int for token_id in token_ids]):
|
41 |
-
output = '[' + " ".join(['"' + token.replace('Ġ', ' ') + '"' for token in tokens]) + '] ' + str(token_ids)
|
42 |
-
else:
|
43 |
-
ttt = []
|
44 |
-
for token_idss, tokenss in zip(token_ids, tokens):
|
45 |
-
tt = []
|
46 |
-
for token_ids, tokens in zip(token_idss, tokenss):
|
47 |
-
t = '[' + " ".join(['"' + token.replace('Ġ', ' ') + '"' for token in tokens]) + '] ' + str(token_ids)
|
48 |
-
tt.append(t)
|
49 |
-
tt = '\n'.join(tt)
|
50 |
-
ttt.append(tt)
|
51 |
-
output = '\n\n'.join(ttt)
|
52 |
-
return output
|
53 |
-
def format_doc_metadata(doc):
|
54 |
-
formatted = f'Document #{doc["doc_ix"]}\n'
|
55 |
-
if doc['doc_len'] == doc['disp_len']:
|
56 |
-
formatted += f'Length: {doc["doc_len"]} tokens\n'
|
57 |
-
else:
|
58 |
-
formatted += f'Length: {doc["doc_len"]} tokens ({doc["disp_len"]} tokens displayed)\n'
|
59 |
-
metadata = doc['metadata'].strip("\n")
|
60 |
-
formatted += f'Metadata: {metadata}'
|
61 |
-
return formatted
|
62 |
-
|
63 |
-
def count(index_desc, query, max_clause_freq, max_diff_tokens):
|
64 |
-
if ' AND ' in query or ' OR ' in query: # CNF query
|
65 |
-
result = process('count', index_desc, query=query, max_clause_freq=max_clause_freq, max_diff_tokens=max_diff_tokens)
|
66 |
-
else: # simple query
|
67 |
-
result = process('count', index_desc, query=query)
|
68 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
69 |
-
tokenization_info = format_tokenization_info(result)
|
70 |
-
if 'error' in result:
|
71 |
-
count = result['error']
|
72 |
-
else:
|
73 |
-
count = f'{result["count"]:,}'
|
74 |
-
return latency, tokenization_info, count
|
75 |
-
|
76 |
-
def prob(index_desc, query):
|
77 |
-
result = process('prob', index_desc, query=query)
|
78 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
79 |
-
tokenization_info = format_tokenization_info(result)
|
80 |
-
if 'error' in result:
|
81 |
-
prob = result['error']
|
82 |
-
elif result['prompt_cnt'] == 0:
|
83 |
-
prob = '(n-1)-gram is not found in the corpus'
|
84 |
-
else:
|
85 |
-
prob = f'{result["prob"]:.4f} ({result["cont_cnt"]:,} / {result["prompt_cnt"]:,})'
|
86 |
-
return latency, tokenization_info, prob
|
87 |
-
|
88 |
-
def ntd(index_desc, query, max_support):
|
89 |
-
result = process('ntd', index_desc, query=query, max_support=max_support)
|
90 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
91 |
-
tokenization_info = format_tokenization_info(result)
|
92 |
-
if 'error' in result:
|
93 |
-
ntd = result['error']
|
94 |
-
else:
|
95 |
-
result_by_token_id = result['result_by_token_id']
|
96 |
-
ntd = {}
|
97 |
-
for token_id, r in result_by_token_id.items():
|
98 |
-
ntd[f'{r["token"]} ({r["cont_cnt"]} / {result["prompt_cnt"]})'] = r['prob']
|
99 |
-
if ntd == {}:
|
100 |
-
ntd = '(n-1)-gram is not found in the corpus'
|
101 |
-
return latency, tokenization_info, ntd
|
102 |
-
|
103 |
-
def infgram_prob(index_desc, query):
|
104 |
-
result = process('infgram_prob', index_desc, query=query)
|
105 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
106 |
-
tokenization_info = format_tokenization_info(result)
|
107 |
-
if 'error' in result:
|
108 |
-
longest_suffix = ''
|
109 |
-
prob = result['error']
|
110 |
-
else:
|
111 |
-
longest_suffix = result['longest_suffix']
|
112 |
-
prob = f'{result["prob"]:.4f} ({result["cont_cnt"]:,} / {result["prompt_cnt"]:,})'
|
113 |
-
return latency, tokenization_info, longest_suffix, prob
|
114 |
-
|
115 |
-
def infgram_ntd(index_desc, query, max_support):
|
116 |
-
result = process('infgram_ntd', index_desc, query=query, max_support=max_support)
|
117 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
118 |
-
tokenization_info = format_tokenization_info(result)
|
119 |
-
if 'error' in result:
|
120 |
-
longest_suffix = ''
|
121 |
-
ntd = result['error']
|
122 |
-
else:
|
123 |
-
longest_suffix = result['longest_suffix']
|
124 |
-
result_by_token_id = result['result_by_token_id']
|
125 |
-
ntd = {}
|
126 |
-
for token_id, r in result_by_token_id.items():
|
127 |
-
ntd[f'{r["token"]} ({r["cont_cnt"]} / {result["prompt_cnt"]})'] = r['prob']
|
128 |
-
return latency, tokenization_info, longest_suffix, ntd
|
129 |
-
|
130 |
-
def search_docs(index_desc, query, maxnum, max_disp_len, max_clause_freq, max_diff_tokens):
|
131 |
-
if ' AND ' in query or ' OR ' in query: # CNF query
|
132 |
-
result = process('search_docs', index_desc, query=query, maxnum=maxnum, max_disp_len=max_disp_len, max_clause_freq=max_clause_freq, max_diff_tokens=max_diff_tokens)
|
133 |
-
else: # simple query
|
134 |
-
result = process('search_docs', index_desc, query=query, maxnum=maxnum, max_disp_len=max_disp_len)
|
135 |
-
latency = '' if 'latency' not in result else f'{result["latency"]:.3f}'
|
136 |
-
tokenization_info = format_tokenization_info(result)
|
137 |
-
if 'error' in result:
|
138 |
-
message = result['error']
|
139 |
-
metadatas = ['' for _ in range(MAXNUM)]
|
140 |
-
docs = [[] for _ in range(MAXNUM)]
|
141 |
-
else:
|
142 |
-
message = result['message']
|
143 |
-
metadatas = [format_doc_metadata(doc) for doc in result['documents']]
|
144 |
-
docs = [doc['spans'] for doc in result['documents']]
|
145 |
-
metadatas = metadatas[:maxnum]
|
146 |
-
docs = docs[:maxnum]
|
147 |
-
while len(metadatas) < MAXNUM:
|
148 |
-
metadatas.append('')
|
149 |
-
while len(docs) < MAXNUM:
|
150 |
-
docs.append([])
|
151 |
-
return tuple([latency, tokenization_info, message] + metadatas + docs)
|
152 |
-
|
153 |
-
def search_docs_new(index_desc, query, max_disp_len, max_clause_freq, max_diff_tokens, state):
|
154 |
-
if ' AND ' in query or ' OR ' in query: # CNF query
|
155 |
-
find_result = process('find_cnf', index_desc, query=query, max_clause_freq=max_clause_freq, max_diff_tokens=max_diff_tokens)
|
156 |
-
find_result['type'] = 'cnf'
|
157 |
-
else: # simple query
|
158 |
-
find_result = process('find', index_desc, query=query)
|
159 |
-
find_result['type'] = 'simple'
|
160 |
-
|
161 |
-
state = find_result
|
162 |
-
|
163 |
-
latency = '' if 'latency' not in find_result else f'{find_result["latency"]:.3f}'
|
164 |
-
tokenization_info = format_tokenization_info(find_result)
|
165 |
-
if 'error' in find_result:
|
166 |
-
message = find_result['error']
|
167 |
-
idx = gr.Number(minimum=0, maximum=0, step=1, value=0, interactive=False)
|
168 |
-
metadata = ''
|
169 |
-
doc = []
|
170 |
-
return latency, tokenization_info, message, idx, metadata, doc, state
|
171 |
-
|
172 |
-
if ' AND ' in query or ' OR ' in query: # CNF query
|
173 |
-
ptrs_by_shard = find_result['ptrs_by_shard']
|
174 |
-
cnt_retrievable = sum([len(ptrs) for ptrs in ptrs_by_shard])
|
175 |
-
if find_result["approx"]:
|
176 |
-
message = f'Approximately {find_result["cnt"]} occurrences found, of which {cnt_retrievable} are retrievable'
|
177 |
-
else:
|
178 |
-
message = f'{find_result["cnt"]} occurrences found'
|
179 |
-
else: # simple query
|
180 |
-
message = f'{find_result["cnt"]} occurrences found'
|
181 |
-
cnt_retrievable = find_result['cnt']
|
182 |
-
if cnt_retrievable == 0:
|
183 |
-
idx = gr.Number(minimum=0, maximum=0, step=1, value=0, interactive=False)
|
184 |
-
metadata = ''
|
185 |
-
doc = []
|
186 |
-
return latency, tokenization_info, message, idx, metadata, doc, state
|
187 |
-
idx = random.randint(0, cnt_retrievable-1)
|
188 |
-
metadata, doc = get_another_doc(index_desc, idx, max_disp_len, state)
|
189 |
-
idx = gr.Number(minimum=0, maximum=cnt_retrievable-1, step=1, value=idx, interactive=True)
|
190 |
-
return latency, tokenization_info, message, idx, metadata, doc, state
|
191 |
-
|
192 |
-
def clear_search_docs_new(state):
|
193 |
-
state = None
|
194 |
-
idx = gr.Number(minimum=0, maximum=0, step=1, value=0, interactive=False)
|
195 |
-
return idx, state
|
196 |
-
|
197 |
-
def get_another_doc(index_desc, idx, max_disp_len, state):
|
198 |
-
find_result = state
|
199 |
-
if find_result is None or not (type(idx) == int and 0 <= idx and idx < find_result['cnt']):
|
200 |
-
metadata = ''
|
201 |
-
doc = []
|
202 |
-
return metadata, doc
|
203 |
-
if find_result['type'] == 'cnf':
|
204 |
-
ptrs_by_shard = find_result['ptrs_by_shard']
|
205 |
-
cnt_by_shard = [len(ptrs) for ptrs in ptrs_by_shard]
|
206 |
-
s = 0
|
207 |
-
while idx >= cnt_by_shard[s]:
|
208 |
-
idx -= cnt_by_shard[s]
|
209 |
-
s += 1
|
210 |
-
ptr = ptrs_by_shard[s][idx]
|
211 |
-
result = process('get_doc_by_ptr', index_desc, s=s, ptr=ptr, max_disp_len=max_disp_len, query_ids=find_result['token_ids'])
|
212 |
-
else: # simple query
|
213 |
-
segment_by_shard = find_result['segment_by_shard']
|
214 |
-
cnt_by_shard = [end - start for (start, end) in segment_by_shard]
|
215 |
-
s = 0
|
216 |
-
while idx >= cnt_by_shard[s]:
|
217 |
-
idx -= cnt_by_shard[s]
|
218 |
-
s += 1
|
219 |
-
rank = segment_by_shard[s][0] + idx
|
220 |
-
result = process('get_doc_by_rank', index_desc, s=s, rank=rank, max_disp_len=max_disp_len, query_ids=find_result['token_ids'])
|
221 |
-
if 'error' in result:
|
222 |
-
metadata = result['error']
|
223 |
-
doc = []
|
224 |
-
return metadata, doc
|
225 |
-
metadata = format_doc_metadata(result)
|
226 |
-
doc = result['spans']
|
227 |
-
return metadata, doc
|
228 |
-
|
229 |
-
with gr.Blocks() as demo:
|
230 |
-
with gr.Column():
|
231 |
-
gr.HTML(
|
232 |
-
'''<h1 text-align="center">Infini-gram: An Efficient Search Engine over the Massive Pretraining Datasets of Language Models</h1>
|
233 |
-
<p style='font-size: 16px;'>This engine does exact-match search over several open pretraining datasets of language models. Please first select the corpus and the type of query, then enter your query and submit.</p>
|
234 |
-
<p style='font-size: 16px;'>The engine is developed by <a href="https://liujch1998.github.io">Jiacheng Liu</a> and documented in our paper: <a href="https://huggingface.co/papers/2401.17377">Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens</a>. Feel free to check out our <a href="https://infini-gram.io">Project Homepage</a>.</p>
|
235 |
-
<p style='font-size: 16px;'><b>API Endpoint:</b> If you'd like to issue batch queries to infini-gram, you may invoke our API endpoint. Please refer to the <a href="https://infini-gram.io/api_doc">API documentation</a>.</p>
|
236 |
-
<p style='font-size: 16px;'><b>Note:</b> The query is <b>case-sensitive</b>. Your query will be tokenized with the Llama-2 tokenizer (unless otherwise specified).</p>
|
237 |
-
'''
|
238 |
-
)
|
239 |
-
with gr.Row():
|
240 |
-
with gr.Column(scale=1, min_width=240):
|
241 |
-
index_desc = gr.Radio(choices=INDEX_DESCS, label='Corpus', value=INDEX_DESCS[0])
|
242 |
-
|
243 |
-
with gr.Column(scale=7):
|
244 |
-
with gr.Tab('1. Count an n-gram'):
|
245 |
-
with gr.Column():
|
246 |
-
gr.HTML('<h2>1. Count an n-gram</h2>')
|
247 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
248 |
-
gr.HTML(f'''<p style="font-size: 16px;">This counts the number of times an n-gram appears in the corpus. If you submit an empty input, it will return the total number of tokens in the corpus. You can also make more complex queries by connecting multiple n-gram terms with the AND/OR operators, in the <a href="https://en.wikipedia.org/wiki/Conjunctive_normal_form">CNF format</a>.</p>
|
249 |
-
<br>
|
250 |
-
<p style="font-size: 16px;">Example queries:</p>
|
251 |
-
<ul style="font-size: 16px;">
|
252 |
-
<li><b>natural language processing</b> (the output is number of occurrences of "natural language processing")</li>
|
253 |
-
<li><b>natural language processing AND deep learning</b> (the output is the number of co-occurrences of "natural language processing" and "deep learning")</li>
|
254 |
-
<li><b>natural language processing OR artificial intelligence AND deep learning OR machine learning</b> (the output is the number of co-occurrences of [one of "natural language processing" / "artificial intelligence"] and [one of "deep learning" / "machine learning"])</li>
|
255 |
-
</ul>
|
256 |
-
<br>
|
257 |
-
<p style="font-size: 16px;">Notes on CNF queries:</p>
|
258 |
-
<ul style="font-size: 16px;">
|
259 |
-
<li>A CNF query may contain up to {MAX_CLAUSES_PER_CNF} clauses, and each clause may contain up to {MAX_TERMS_PER_CLAUSE} n-gram terms.</li>
|
260 |
-
<li>When you write a query in CNF, note that <b>OR has higher precedence than AND</b> (which is contrary to conventions in boolean algebra).</li>
|
261 |
-
<li>In AND queries, we can only examine co-occurrences where adjacent clauses are separated by no more than {max_diff_tokens} tokens. This value can be adjusted within range [1, {MAX_DIFF_TOKENS}] in "Advanced options".</li>
|
262 |
-
<li>In AND queries, if a clause has more than {max_clause_freq} matches, we will estimate the count by examining a random subset of {max_clause_freq} occurrences of clause. This value can be adjusted within range [1, {MAX_CLAUSE_FREQ}] in "Advanced options".</li>
|
263 |
-
<li>The above subsampling mechanism might cause a zero count on co-occurrences of some simple n-grams (e.g., <b>birds AND oil</b>).</li>
|
264 |
-
</ul>
|
265 |
-
''')
|
266 |
-
with gr.Row():
|
267 |
-
with gr.Column(scale=1):
|
268 |
-
count_query = gr.Textbox(placeholder='Enter a string (an n-gram) here', label='Query', interactive=True)
|
269 |
-
with gr.Accordion(label='Advanced options', open=False):
|
270 |
-
with gr.Row():
|
271 |
-
count_max_clause_freq = gr.Slider(minimum=1, maximum=MAX_CLAUSE_FREQ, value=max_clause_freq, step=1, label='max_clause_freq')
|
272 |
-
count_max_diff_tokens = gr.Slider(minimum=1, maximum=MAX_DIFF_TOKENS, value=max_diff_tokens, step=1, label='max_diff_tokens')
|
273 |
-
with gr.Row():
|
274 |
-
count_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
275 |
-
count_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
276 |
-
count_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
277 |
-
count_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
278 |
-
with gr.Column(scale=1):
|
279 |
-
count_count = gr.Label(label='Count', num_top_classes=0)
|
280 |
-
count_clear.add([count_query, count_latency, count_tokenized, count_count])
|
281 |
-
count_submit.click(count, inputs=[index_desc, count_query, count_max_clause_freq, count_max_diff_tokens], outputs=[count_latency, count_tokenized, count_count], api_name=False)
|
282 |
-
|
283 |
-
with gr.Tab('2. Prob of the last token'):
|
284 |
-
with gr.Column():
|
285 |
-
gr.HTML('<h2>2. Compute the probability of the last token in an n-gram</h2>')
|
286 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
287 |
-
gr.HTML(f'''<p style="font-size: 16px;">This computes the n-gram probability of the last token conditioned on the previous tokens (i.e. (n-1)-gram)).</p>
|
288 |
-
<br>
|
289 |
-
<p style="font-size: 16px;">Example query: <b>natural language processing</b> (the output is P(processing | natural language), by counting the appearance of the 3-gram "natural language processing" and the 2-gram "natural language", and take the division between the two)</p>
|
290 |
-
<br>
|
291 |
-
<p style="font-size: 16px;">Notes:</p>
|
292 |
-
<ul style="font-size: 16px;">
|
293 |
-
<li>The (n-1)-gram needs to exist in the corpus. If the (n-1)-gram is not found in the corpus, an error message will appear.</li>
|
294 |
-
</ul>
|
295 |
-
''')
|
296 |
-
with gr.Row():
|
297 |
-
with gr.Column(scale=1):
|
298 |
-
prob_query = gr.Textbox(placeholder='Enter a string (an n-gram) here', label='Query', interactive=True)
|
299 |
-
with gr.Row():
|
300 |
-
prob_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
301 |
-
prob_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
302 |
-
prob_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
303 |
-
prob_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
304 |
-
with gr.Column(scale=1):
|
305 |
-
prob_probability = gr.Label(label='Probability', num_top_classes=0)
|
306 |
-
prob_clear.add([prob_query, prob_latency, prob_tokenized, prob_probability])
|
307 |
-
prob_submit.click(prob, inputs=[index_desc, prob_query], outputs=[prob_latency, prob_tokenized, prob_probability], api_name=False)
|
308 |
-
|
309 |
-
with gr.Tab('3. Next-token distribution'):
|
310 |
-
with gr.Column():
|
311 |
-
gr.HTML('<h2>3. Compute the next-token distribution of an (n-1)-gram</h2>')
|
312 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
313 |
-
gr.HTML(f'''<p style="font-size: 16px;">This is an extension of the Query Type 2: It interprets your input as the (n-1)-gram and gives you the full next-token distribution.</p>
|
314 |
-
<br>
|
315 |
-
<p style="font-size: 16px;">Example query: <b>natural language</b> (the output is P(* | natural language), for the top-10 tokens *)</p>
|
316 |
-
<br>
|
317 |
-
<p style="font-size: 16px;">Notes:</p>
|
318 |
-
<ul style="font-size: 16px;">
|
319 |
-
<li>The (n-1)-gram needs to exist in the corpus. If the (n-1)-gram is not found in the corpus, an error message will appear.</li>
|
320 |
-
<li>If the (n-1)-gram appears more than {max_support} times in the corpus, the result will be approximate: we will estimate the distribution by examining a subset of {max_support} occurrences of the (n-1)-gram. This value can be adjusted within range [1, {MAX_SUPPORT}] in "Advanced options".</li>
|
321 |
-
</ul>
|
322 |
-
''')
|
323 |
-
|
324 |
-
with gr.Row():
|
325 |
-
with gr.Column(scale=1):
|
326 |
-
ntd_query = gr.Textbox(placeholder='Enter a string (an (n-1)-gram) here', label='Query', interactive=True)
|
327 |
-
with gr.Accordion(label='Advanced options', open=False):
|
328 |
-
ntd_max_support = gr.Slider(minimum=1, maximum=MAX_SUPPORT, value=MAX_SUPPORT, step=1, label='max_support')
|
329 |
-
with gr.Row():
|
330 |
-
ntd_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
331 |
-
ntd_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
332 |
-
ntd_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
333 |
-
ntd_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
334 |
-
with gr.Column(scale=1):
|
335 |
-
ntd_distribution = gr.Label(label='Distribution', num_top_classes=10)
|
336 |
-
ntd_clear.add([ntd_query, ntd_latency, ntd_tokenized, ntd_distribution])
|
337 |
-
ntd_submit.click(ntd, inputs=[index_desc, ntd_query, ntd_max_support], outputs=[ntd_latency, ntd_tokenized, ntd_distribution], api_name=False)
|
338 |
-
|
339 |
-
with gr.Tab('4. ∞-gram prob'):
|
340 |
-
with gr.Column():
|
341 |
-
gr.HTML('<h2>4. Compute the ∞-gram probability of the last token</h2>')
|
342 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
343 |
-
gr.HTML(f'''<p style="font-size: 16px;">This computes the ∞-gram probability of the last token conditioned on the previous tokens. Compared to Query Type 2 (which uses your entire input for n-gram modeling), here we take the longest suffix that we can find in the corpus.</p>
|
344 |
-
<br>
|
345 |
-
<p style="font-size: 16px;">Example query: <b>I love natural language processing</b> (if "natural language" appears in the corpus but "love natural language" doesn't, the output is P(processing | natural language); in this case the effective n = 3)</p>
|
346 |
-
<br>
|
347 |
-
<p style="font-size: 16px;">Notes:</p>
|
348 |
-
<ul style="font-size: 16px;">
|
349 |
-
<li>It may be possible that the effective n = 1, i.e. longest found suffix is empty, in which case it reduces to the uni-gram probability of the last token.</li>
|
350 |
-
</ul>
|
351 |
-
''')
|
352 |
-
with gr.Row():
|
353 |
-
with gr.Column(scale=1):
|
354 |
-
infgram_prob_query = gr.Textbox(placeholder='Enter a string here', label='Query', interactive=True)
|
355 |
-
with gr.Row():
|
356 |
-
infgram_prob_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
357 |
-
infgram_prob_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
358 |
-
infgram_prob_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
359 |
-
infgram_prob_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
360 |
-
infgram_prob_longest_suffix = gr.Textbox(label='Longest Found Suffix', interactive=False)
|
361 |
-
with gr.Column(scale=1):
|
362 |
-
infgram_prob_probability = gr.Label(label='Probability', num_top_classes=0)
|
363 |
-
infgram_prob_clear.add([infgram_prob_query, infgram_prob_latency, infgram_prob_tokenized, infgram_prob_longest_suffix, infgram_prob_probability])
|
364 |
-
infgram_prob_submit.click(infgram_prob, inputs=[index_desc, infgram_prob_query], outputs=[infgram_prob_latency, infgram_prob_tokenized, infgram_prob_longest_suffix, infgram_prob_probability], api_name=False)
|
365 |
-
|
366 |
-
with gr.Tab('5. ∞-gram next-token distribution'):
|
367 |
-
with gr.Column():
|
368 |
-
gr.HTML('<h2>5. Compute the ∞-gram next-token distribution</h2>')
|
369 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
370 |
-
gr.HTML(f'''<p style="font-size: 16px;">This is similar to Query Type 3, but with ∞-gram instead of n-gram.</p>
|
371 |
-
<br>
|
372 |
-
<p style="font-size: 16px;">Example query: <b>I love natural language</b> (if "natural language" appears in the corpus but "love natural language" doesn't, the output is P(* | natural language), for the top-10 tokens *)</p>
|
373 |
-
''')
|
374 |
-
with gr.Row():
|
375 |
-
with gr.Column(scale=1):
|
376 |
-
infgram_ntd_query = gr.Textbox(placeholder='Enter a string here', label='Query', interactive=True)
|
377 |
-
with gr.Accordion(label='Advanced options', open=False):
|
378 |
-
infgram_ntd_max_support = gr.Slider(minimum=1, maximum=MAX_SUPPORT, value=MAX_SUPPORT, step=1, label='max_support')
|
379 |
-
with gr.Row():
|
380 |
-
infgram_ntd_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
381 |
-
infgram_ntd_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
382 |
-
infgram_ntd_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
383 |
-
infgram_ntd_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
384 |
-
infgram_ntd_longest_suffix = gr.Textbox(label='Longest Found Suffix', interactive=False)
|
385 |
-
with gr.Column(scale=1):
|
386 |
-
infgram_ntd_distribution = gr.Label(label='Distribution', num_top_classes=10)
|
387 |
-
infgram_ntd_clear.add([infgram_ntd_query, infgram_ntd_latency, infgram_ntd_tokenized, infgram_ntd_longest_suffix, infgram_ntd_distribution])
|
388 |
-
infgram_ntd_submit.click(infgram_ntd, inputs=[index_desc, infgram_ntd_query, infgram_ntd_max_support], outputs=[infgram_ntd_latency, infgram_ntd_tokenized, infgram_ntd_longest_suffix, infgram_ntd_distribution], api_name=False)
|
389 |
-
|
390 |
-
with gr.Tab('6. Search documents', visible=False):
|
391 |
-
with gr.Column():
|
392 |
-
gr.HTML(f'''<h2>6. Search for documents containing n-gram(s)</h2>''')
|
393 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
394 |
-
gr.HTML(f'''<p style="font-size: 16px;">This displays a few random documents in the corpus that satisfies your query. You can simply enter an n-gram, in which case the document displayed would contain your n-gram. You can also connect multiple n-gram terms with the AND/OR operators, in the <a href="https://en.wikipedia.org/wiki/Conjunctive_normal_form">CNF format</a>, in which case the displayed document contains n-grams such that it satisfies this logical constraint.</p>
|
395 |
-
<br>
|
396 |
-
<p style="font-size: 16px;">Example queries:</p>
|
397 |
-
<ul style="font-size: 16px;">
|
398 |
-
<li><b>natural language processing</b> (the displayed document would contain "natural language processing")</li>
|
399 |
-
<li><b>natural language processing AND deep learning</b> (the displayed document would contain both "natural language processing" and "deep learning")</li>
|
400 |
-
<li><b>natural language processing OR artificial intelligence AND deep learning OR machine learning</b> (the displayed document would contain at least one of "natural language processing" / "artificial intelligence", and also at least one of "deep learning" / "machine learning")</li>
|
401 |
-
</ul>
|
402 |
-
<br>
|
403 |
-
<p style="font-size: 16px;">If you want another batch of random documents, simply hit the Submit button again :)</p>
|
404 |
-
<br>
|
405 |
-
<p style="font-size: 16px;">Notes on CNF queries:</p>
|
406 |
-
<ul style="font-size: 16px;">
|
407 |
-
<li>A CNF query may contain up to {MAX_CLAUSES_PER_CNF} clauses, and each clause may contain up to {MAX_TERMS_PER_CLAUSE} n-gram terms.</li>
|
408 |
-
<li>When you write a query in CNF, note that <b>OR has higher precedence than AND</b> (which is contrary to conventions in boolean algebra).</li>
|
409 |
-
<li>In AND queries, we can only examine co-occurrences where adjacent clauses are separated by no more than {max_diff_tokens} tokens. This value can be adjusted within range [1, {MAX_DIFF_TOKENS}] in "Advanced options".</li>
|
410 |
-
<li>In AND queries, if a clause has more than {max_clause_freq} matches, we will estimate the count by examining a random subset of {max_clause_freq} documents out of all documents containing that clause. This value can be adjusted within range [1, {MAX_CLAUSE_FREQ}] in "Advanced options".</li>
|
411 |
-
<li>The above subsampling mechanism might cause a zero count on co-occurrences of some simple n-grams (e.g., <b>birds AND oil</b>).</li>
|
412 |
-
</ul>
|
413 |
-
<br>
|
414 |
-
<p style="font-size: 16px;">❗️WARNING: Corpus may contain problematic contents such as PII, toxicity, hate speech, and NSFW text. This tool is merely presenting selected text from the corpus, without any post-hoc safety filtering. It is NOT creating new text. This is a research prototype through which we can expose and examine existing problems with massive text corpora. Please use with caution. Don't be evil :)</p>
|
415 |
-
''')
|
416 |
-
with gr.Row():
|
417 |
-
with gr.Column(scale=1):
|
418 |
-
search_docs_query = gr.Textbox(placeholder='Enter a query here', label='Query', interactive=True)
|
419 |
-
search_docs_maxnum = gr.Slider(minimum=1, maximum=MAXNUM, value=maxnum, step=1, label='Number of documents to display')
|
420 |
-
search_docs_max_disp_len = gr.Slider(minimum=1, maximum=MAX_DISP_LEN, value=max_disp_len, step=1, label='Number of tokens to display')
|
421 |
-
with gr.Accordion(label='Advanced options', open=False):
|
422 |
-
with gr.Row():
|
423 |
-
search_docs_max_clause_freq = gr.Slider(minimum=1, maximum=MAX_CLAUSE_FREQ, value=max_clause_freq, step=1, label='max_clause_freq')
|
424 |
-
search_docs_max_diff_tokens = gr.Slider(minimum=1, maximum=MAX_DIFF_TOKENS, value=max_diff_tokens, step=1, label='max_diff_tokens')
|
425 |
-
with gr.Row():
|
426 |
-
search_docs_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
427 |
-
search_docs_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
428 |
-
search_docs_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
429 |
-
search_docs_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
430 |
-
with gr.Column(scale=2):
|
431 |
-
search_docs_message = gr.Label(label='Message', num_top_classes=0)
|
432 |
-
search_docs_metadatas = []
|
433 |
-
search_docs_outputs = []
|
434 |
-
for i in range(MAXNUM):
|
435 |
-
with gr.Tab(label=str(i+1)):
|
436 |
-
search_docs_metadatas.append(gr.Textbox(label='Metadata', lines=3, interactive=False))
|
437 |
-
search_docs_outputs.append(gr.HighlightedText(label='Document', show_legend=False, color_map={"-": "red", "0": "green", "1": "cyan", "2": "blue", "3": "magenta"}))
|
438 |
-
search_docs_clear.add([search_docs_query, search_docs_latency, search_docs_tokenized, search_docs_message] + search_docs_metadatas + search_docs_outputs)
|
439 |
-
search_docs_submit.click(search_docs, inputs=[index_desc, search_docs_query, search_docs_maxnum, search_docs_max_disp_len, search_docs_max_clause_freq, search_docs_max_diff_tokens], outputs=[search_docs_latency, search_docs_tokenized, search_docs_message] + search_docs_metadatas + search_docs_outputs, api_name=False)
|
440 |
-
|
441 |
-
with gr.Tab('6. Search documents'):
|
442 |
-
with gr.Column():
|
443 |
-
gr.HTML(f'''<h2>6. Search for documents containing n-gram(s)</h2>''')
|
444 |
-
with gr.Accordion(label='Click to view instructions', open=False):
|
445 |
-
gr.HTML(f'''<p style="font-size: 16px;">This displays the documents in the corpus that satisfies your query. You can simply enter an n-gram, in which case the document displayed would contain your n-gram. You can also connect multiple n-gram terms with the AND/OR operators, in the <a href="https://en.wikipedia.org/wiki/Conjunctive_normal_form">CNF format</a>, in which case the displayed document contains n-grams such that it satisfies this logical constraint.</p>
|
446 |
-
<br>
|
447 |
-
<p style="font-size: 16px;">Example queries:</p>
|
448 |
-
<ul style="font-size: 16px;">
|
449 |
-
<li><b>natural language processing</b> (the displayed document would contain "natural language processing")</li>
|
450 |
-
<li><b>natural language processing AND deep learning</b> (the displayed document would contain both "natural language processing" and "deep learning")</li>
|
451 |
-
<li><b>natural language processing OR artificial intelligence AND deep learning OR machine learning</b> (the displayed document would contain at least one of "natural language processing" / "artificial intelligence", and also at least one of "deep learning" / "machine learning")</li>
|
452 |
-
</ul>
|
453 |
-
<br>
|
454 |
-
<p style="font-size: 16px;">Notes on CNF queries:</p>
|
455 |
-
<ul style="font-size: 16px;">
|
456 |
-
<li>A CNF query may contain up to {MAX_CLAUSES_PER_CNF} clauses, and each clause may contain up to {MAX_TERMS_PER_CLAUSE} n-gram terms.</li>
|
457 |
-
<li>When you write a query in CNF, note that <b>OR has higher precedence than AND</b> (which is contrary to conventions in boolean algebra).</li>
|
458 |
-
<li>In AND queries, we can only examine co-occurrences where adjacent clauses are separated by no more than {max_diff_tokens} tokens. This value can be adjusted within range [1, {MAX_DIFF_TOKENS}] in "Advanced options".</li>
|
459 |
-
<li>In AND queries, if a clause has more than {max_clause_freq} matches, we will estimate the count by examining a random subset of {max_clause_freq} occurrences of that clause. This value can be adjusted within range [1, {MAX_CLAUSE_FREQ}] in "Advanced options".</li>
|
460 |
-
<li>The above subsampling mechanism might cause a zero count on co-occurrences of some simple n-grams (e.g., <b>birds AND oil</b>).</li>
|
461 |
-
</ul>
|
462 |
-
<br>
|
463 |
-
<p style="font-size: 16px;">❗️WARNING: Corpus may contain problematic contents such as PII, toxicity, hate speech, and NSFW text. This tool is merely presenting selected text from the corpus, without any post-hoc safety filtering. It is NOT creating new text. This is a research prototype through which we can expose and examine existing problems with massive text corpora. Please use with caution. Don't be evil :)</p>
|
464 |
-
''')
|
465 |
-
with gr.Row():
|
466 |
-
with gr.Column(scale=1):
|
467 |
-
search_docs_new_query = gr.Textbox(placeholder='Enter a query here', label='Query', interactive=True)
|
468 |
-
search_docs_new_max_disp_len = gr.Slider(minimum=1, maximum=MAX_DISP_LEN, value=max_disp_len, step=1, label='Number of tokens to display')
|
469 |
-
with gr.Accordion(label='Advanced options', open=False):
|
470 |
-
with gr.Row():
|
471 |
-
search_docs_new_max_clause_freq = gr.Slider(minimum=1, maximum=MAX_CLAUSE_FREQ, value=max_clause_freq, step=1, label='max_clause_freq')
|
472 |
-
search_docs_new_max_diff_tokens = gr.Slider(minimum=1, maximum=MAX_DIFF_TOKENS, value=max_diff_tokens, step=1, label='max_diff_tokens')
|
473 |
-
with gr.Row():
|
474 |
-
search_docs_new_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True)
|
475 |
-
search_docs_new_submit = gr.Button(value='Submit', variant='primary', visible=True)
|
476 |
-
search_docs_new_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1)
|
477 |
-
search_docs_new_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False)
|
478 |
-
with gr.Column(scale=2):
|
479 |
-
search_docs_new_message = gr.Label(label='Message', num_top_classes=0)
|
480 |
-
search_docs_new_idx = gr.Slider(label='', minimum=0, maximum=0, step=1, value=0, interactive=False)
|
481 |
-
search_docs_new_metadata = gr.Textbox(label='Metadata', lines=3, max_lines=3, interactive=False)
|
482 |
-
search_docs_new_output = gr.HighlightedText(label='Document', show_legend=False, color_map={"-": "red", "0": "green", "1": "cyan", "2": "blue", "3": "magenta"})
|
483 |
-
search_docs_state = gr.State(value=None)
|
484 |
-
search_docs_new_clear.add([search_docs_new_query, search_docs_new_latency, search_docs_new_tokenized, search_docs_new_message, search_docs_new_idx, search_docs_new_metadata, search_docs_new_output])
|
485 |
-
search_docs_new_clear.click(
|
486 |
-
clear_search_docs_new,
|
487 |
-
inputs=[search_docs_state],
|
488 |
-
outputs=[search_docs_new_idx, search_docs_state]
|
489 |
-
)
|
490 |
-
search_docs_new_submit.click(
|
491 |
-
search_docs_new,
|
492 |
-
inputs=[index_desc, search_docs_new_query, search_docs_new_max_disp_len,
|
493 |
-
search_docs_new_max_clause_freq, search_docs_new_max_diff_tokens,
|
494 |
-
search_docs_state],
|
495 |
-
outputs=[search_docs_new_latency, search_docs_new_tokenized,
|
496 |
-
search_docs_new_message, search_docs_new_idx,
|
497 |
-
search_docs_new_metadata, search_docs_new_output,
|
498 |
-
search_docs_state]
|
499 |
-
)
|
500 |
-
search_docs_new_idx.input(
|
501 |
-
get_another_doc,
|
502 |
-
inputs=[index_desc, search_docs_new_idx, search_docs_new_max_disp_len,
|
503 |
-
search_docs_state],
|
504 |
-
outputs=[search_docs_new_metadata, search_docs_new_output]
|
505 |
-
)
|
506 |
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
516 |
}
|
517 |
-
```
|
518 |
-
''')
|
519 |
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
4 |
+
import torch
|
5 |
+
from threading import Thread
|
6 |
+
|
7 |
+
phi4_model_path = "microsoft/Phi-4-reasoning-plus"
|
8 |
+
|
9 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
10 |
+
|
11 |
+
phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, device_map="auto", torch_dtype="auto")
|
12 |
+
phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
|
13 |
+
|
14 |
+
@spaces.GPU(duration=60)
|
15 |
+
def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state):
|
16 |
+
if not user_message.strip():
|
17 |
+
return history_state, history_state
|
18 |
+
|
19 |
+
# Phi-4 model settings
|
20 |
+
model = phi4_model
|
21 |
+
tokenizer = phi4_tokenizer
|
22 |
+
start_tag = "<|im_start|>"
|
23 |
+
sep_tag = "<|im_sep|>"
|
24 |
+
end_tag = "<|im_end|>"
|
25 |
+
|
26 |
+
# Recommended prompt settings by Microsoft
|
27 |
+
system_message = "Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:"
|
28 |
+
prompt = f"{start_tag}system{sep_tag}{system_message}{end_tag}"
|
29 |
+
for message in history_state:
|
30 |
+
if message["role"] == "user":
|
31 |
+
prompt += f"{start_tag}user{sep_tag}{message['content']}{end_tag}"
|
32 |
+
elif message["role"] == "assistant" and message["content"]:
|
33 |
+
prompt += f"{start_tag}assistant{sep_tag}{message['content']}{end_tag}"
|
34 |
+
prompt += f"{start_tag}user{sep_tag}{user_message}{end_tag}{start_tag}assistant{sep_tag}"
|
35 |
+
|
36 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
37 |
+
|
38 |
+
do_sample = not (temperature == 1.0 and top_k >= 100 and top_p == 1.0)
|
39 |
+
|
40 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
|
41 |
+
|
42 |
+
# sampling techniques
|
43 |
+
generation_kwargs = {
|
44 |
+
"input_ids": inputs["input_ids"],
|
45 |
+
"attention_mask": inputs["attention_mask"],
|
46 |
+
"max_new_tokens": int(max_tokens),
|
47 |
+
"do_sample": True,
|
48 |
+
"temperature": 0.8,
|
49 |
+
"top_k": int(top_k),
|
50 |
+
"top_p": 0.95,
|
51 |
+
"repetition_penalty": repetition_penalty,
|
52 |
+
"streamer": streamer,
|
53 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
56 |
+
thread.start()
|
57 |
+
|
58 |
+
# Stream the response
|
59 |
+
assistant_response = ""
|
60 |
+
new_history = history_state + [
|
61 |
+
{"role": "user", "content": user_message},
|
62 |
+
{"role": "assistant", "content": ""}
|
63 |
+
]
|
64 |
+
for new_token in streamer:
|
65 |
+
cleaned_token = new_token.replace("<|im_start|>", "").replace("<|im_sep|>", "").replace("<|im_end|>", "")
|
66 |
+
assistant_response += cleaned_token
|
67 |
+
new_history[-1]["content"] = assistant_response.strip()
|
68 |
+
yield new_history, new_history
|
69 |
+
|
70 |
+
yield new_history, new_history
|
71 |
+
|
72 |
+
example_messages = {
|
73 |
+
"Math reasoning": "If a rectangular prism has a length of 6 cm, a width of 4 cm, and a height of 5 cm, what is the length of the longest line segment that can be drawn from one vertex to another?",
|
74 |
+
"Logic puzzle": "Four people (Alex, Blake, Casey, and Dana) each have a different favorite color (red, blue, green, yellow) and a different favorite fruit (apple, banana, cherry, date). Given the following clues: 1) The person who likes red doesn't like dates. 2) Alex likes yellow. 3) The person who likes blue likes cherries. 4) Blake doesn't like apples or bananas. 5) Casey doesn't like yellow or green. Who likes what color and what fruit?",
|
75 |
+
"Physics problem": "A ball is thrown upward with an initial velocity of 15 m/s from a height of 2 meters above the ground. Assuming the acceleration due to gravity is 9.8 m/s², determine: 1) The maximum height the ball reaches. 2) The total time the ball is in the air before hitting the ground. 3) The velocity with which the ball hits the ground."
|
76 |
}
|
|
|
|
|
77 |
|
78 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
79 |
+
gr.Markdown(
|
80 |
+
"""
|
81 |
+
# Phi-4-reasoning-plus Chatbot
|
82 |
+
Welcome to the Phi-4-reasoning-plus Chatbot! This model excels at multi-step reasoning tasks in mathematics, logic, and science.
|
83 |
+
|
84 |
+
The model will provide responses with two sections:
|
85 |
+
1. **<think>**: A detailed step-by-step reasoning process showing its work
|
86 |
+
2. **Solution**: A concise, accurate final answer based on the reasoning
|
87 |
+
|
88 |
+
Try the example problems below to see how the model breaks down complex reasoning problems.
|
89 |
+
"""
|
90 |
+
)
|
91 |
+
|
92 |
+
history_state = gr.State([])
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
with gr.Column(scale=1):
|
96 |
+
gr.Markdown("### Settings")
|
97 |
+
max_tokens_slider = gr.Slider(
|
98 |
+
minimum=64,
|
99 |
+
maximum=32768,
|
100 |
+
step=1024,
|
101 |
+
value=4096,
|
102 |
+
label="Max Tokens"
|
103 |
+
)
|
104 |
+
with gr.Accordion("Advanced Settings", open=False):
|
105 |
+
temperature_slider = gr.Slider(
|
106 |
+
minimum=0.1,
|
107 |
+
maximum=2.0,
|
108 |
+
value=0.8,
|
109 |
+
label="Temperature"
|
110 |
+
)
|
111 |
+
top_k_slider = gr.Slider(
|
112 |
+
minimum=1,
|
113 |
+
maximum=100,
|
114 |
+
step=1,
|
115 |
+
value=50,
|
116 |
+
label="Top-k"
|
117 |
+
)
|
118 |
+
top_p_slider = gr.Slider(
|
119 |
+
minimum=0.1,
|
120 |
+
maximum=1.0,
|
121 |
+
value=0.95,
|
122 |
+
label="Top-p"
|
123 |
+
)
|
124 |
+
repetition_penalty_slider = gr.Slider(
|
125 |
+
minimum=1.0,
|
126 |
+
maximum=2.0,
|
127 |
+
value=1.0,
|
128 |
+
label="Repetition Penalty"
|
129 |
+
)
|
130 |
+
|
131 |
+
with gr.Column(scale=4):
|
132 |
+
chatbot = gr.Chatbot(label="Chat", type="messages")
|
133 |
+
with gr.Row():
|
134 |
+
user_input = gr.Textbox(
|
135 |
+
label="Your message",
|
136 |
+
placeholder="Type your message here...",
|
137 |
+
scale=3
|
138 |
+
)
|
139 |
+
submit_button = gr.Button("Send", variant="primary", scale=1)
|
140 |
+
clear_button = gr.Button("Clear", scale=1)
|
141 |
+
gr.Markdown("**Try these examples:**")
|
142 |
+
with gr.Row():
|
143 |
+
example1_button = gr.Button("Math reasoning")
|
144 |
+
example2_button = gr.Button("Logic puzzle")
|
145 |
+
example3_button = gr.Button("Physics problem")
|
146 |
+
|
147 |
+
submit_button.click(
|
148 |
+
fn=generate_response,
|
149 |
+
inputs=[user_input, max_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repetition_penalty_slider, history_state],
|
150 |
+
outputs=[chatbot, history_state]
|
151 |
+
).then(
|
152 |
+
fn=lambda: gr.update(value=""),
|
153 |
+
inputs=None,
|
154 |
+
outputs=user_input
|
155 |
+
)
|
156 |
+
|
157 |
+
clear_button.click(
|
158 |
+
fn=lambda: ([], []),
|
159 |
+
inputs=None,
|
160 |
+
outputs=[chatbot, history_state]
|
161 |
+
)
|
162 |
+
|
163 |
+
example1_button.click(
|
164 |
+
fn=lambda: gr.update(value=example_messages["Math reasoning"]),
|
165 |
+
inputs=None,
|
166 |
+
outputs=user_input
|
167 |
+
)
|
168 |
+
example2_button.click(
|
169 |
+
fn=lambda: gr.update(value=example_messages["Logic puzzle"]),
|
170 |
+
inputs=None,
|
171 |
+
outputs=user_input
|
172 |
+
)
|
173 |
+
example3_button.click(
|
174 |
+
fn=lambda: gr.update(value=example_messages["Physics problem"]),
|
175 |
+
inputs=None,
|
176 |
+
outputs=user_input
|
177 |
+
)
|
178 |
+
|
179 |
+
demo.launch(ssr_mode=False)
|