Unfaithful commited on
Commit
86e32e6
Β·
verified Β·
1 Parent(s): ce8862e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +433 -52
app.py CHANGED
@@ -1,63 +1,444 @@
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
2
  from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
 
 
 
 
 
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  ],
 
 
 
 
59
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ import os
2
+ import time
3
+ import copy
4
+ import urllib
5
+ import requests
6
+ import random
7
+ from threading import Thread
8
+ from typing import List, Dict, Union
9
+ import subprocess
10
+ import torch
11
  import gradio as gr
12
+ from bs4 import BeautifulSoup
13
+ import datasets
14
+ from transformers import TextIteratorStreamer
15
+ from transformers import Idefics2ForConditionalGeneration
16
+ from transformers import AutoProcessor
17
  from huggingface_hub import InferenceClient
18
+ from PIL import Image
19
+ import io
20
+ from functools import lru_cache
21
+ from duckduckgo_search import DDGS
22
 
23
+ # Install flash attention, skipping CUDA build if necessary
24
+ subprocess.run(
25
+ "pip install flash-attn --no-build-isolation",
26
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
27
+ shell=True,
28
+ )
29
+
30
+ # Set device to CUDA if available, otherwise CPU
31
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
+
33
+ # Load pre-trained models for image-based chat
34
+ MODELS = {
35
+ "idefics2-8b-chatty": Idefics2ForConditionalGeneration.from_pretrained(
36
+ "HuggingFaceM4/idefics2-8b-chatty",
37
+ torch_dtype=torch.float16,
38
+ _attn_implementation="flash_attention_2",
39
+ ).to(DEVICE),
40
+ }
41
+
42
+ # Load pre-trained processor for image-based chat
43
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
44
+
45
+ # Define system prompt for the image-based chat model
46
+ SYSTEM_PROMPT = [
47
+ {
48
+ "role": "system",
49
+ "content": [
50
+ {
51
+ "type": "text",
52
+ "text": """I am OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Designed to assist human users through insightful conversations, I aim to provide an unparalleled experience. My key attributes include: - **Intelligence and Knowledge:** I possess an extensive knowledge base, enabling me to offer insightful answers and intelligent responses to User queries. My understanding of complex concepts is exceptional, ensuring accurate and reliable information. - **Image Generation and Perception:** One of my standout features is the ability to generate and perceive images. Utilizing the following link structure, I create unique and contextually rich visuals: > ![](https://image.pollinations.ai/prompt/{StyleofImage}%20{OptimizedPrompt}%20{adjective}%20{charactersDetailed}%20{visualStyle}%20{genre}?width={width}&height={height}&nologo=poll&nofeed=yes&seed={random})For image generation, I replace {info inside curly braces} with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. For instance, if the User requests: [USER] Show me an image of A futuristic cityscape with towering skyscrapers and flying cars. [OpenGPT 4o] Generating Image you requested: ![](https://image.pollinations.ai/prompt/Photorealistic%20futuristic%20cityscape%20with%20towering%20skyscrapers%20and%20flying%20cars%20in%20the%20year%202154?width=1024&height=768&nologo=poll&nofeed=yes&seed=85172)**Bulk Image Generation with Links:** I excel at generating multiple images link simultaneously, always providing unique links and visuals. I ensure that each image is distinct and captivates the User.Note: Make sure to always provide image links starting with ! .As given in examples. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question."""
53
+ },
54
+ ],
55
+ },
56
+ {
57
+ "role": "assistant",
58
+ "content": [
59
+ {
60
+ "type": "text",
61
+ "text": "Hello, I'm OpenGPT 4o, made by KingNish. How can I help you? I can chat with you, generate images, classify images and even do all these work in bulk",
62
+ },
63
+ ],
64
+ }
65
+ ]
66
+
67
+ # Chatbot utility functions
68
+
69
+ # Check if a turn in the chat history only contains media
70
+ def turn_is_pure_media(turn):
71
+ return turn[1] is None
72
+
73
+ # Load image from URL
74
+ def load_image_from_url(url):
75
+ with urllib.request.urlopen(url) as response:
76
+ image_data = response.read()
77
+ image_stream = io.BytesIO(image_data)
78
+ image = Image.open(image_stream)
79
+ return image
80
+
81
+ # Convert image to bytes
82
+ def img_to_bytes(image_path):
83
+ image = Image.open(image_path).convert(mode='RGB')
84
+ buffer = io.BytesIO()
85
+ image.save(buffer, format="JPEG")
86
+ img_bytes = buffer.getvalue()
87
+ image.close()
88
+ return img_bytes
89
+
90
+ # Format user prompt with image history and system conditioning
91
+ def format_user_prompt_with_im_history_and_system_conditioning(
92
+ user_prompt, chat_history) -> List[Dict[str, Union[List, str]]]:
93
+ resulting_messages = copy.deepcopy(SYSTEM_PROMPT)
94
+ resulting_images = []
95
+ for resulting_message in resulting_messages:
96
+ if resulting_message["role"] == "user":
97
+ for content in resulting_message["content"]:
98
+ if content["type"] == "image":
99
+ resulting_images.append(load_image_from_url(content["image"]))
100
+ for turn in chat_history:
101
+ if not resulting_messages or (
102
+ resulting_messages and resulting_messages[-1]["role"] != "user"
103
+ ):
104
+ resulting_messages.append(
105
+ {
106
+ "role": "user",
107
+ "content": [],
108
+ }
109
+ )
110
+ if turn_is_pure_media(turn):
111
+ media = turn[0][0]
112
+ resulting_messages[-1]["content"].append({"type": "image"})
113
+ resulting_images.append(Image.open(media))
114
+ else:
115
+ user_utterance, assistant_utterance = turn
116
+ resulting_messages[-1]["content"].append(
117
+ {"type": "text", "text": user_utterance.strip()}
118
+ )
119
+ resulting_messages.append(
120
+ {
121
+ "role": "assistant",
122
+ "content": [{"type": "text", "text": assistant_utterance.strip()}],
123
+ }
124
+ )
125
+ if not user_prompt["files"]:
126
+ resulting_messages.append(
127
+ {
128
+ "role": "user",
129
+ "content": [{"type": "text", "text": user_prompt["text"]}],
130
+ }
131
+ )
132
+ else:
133
+ resulting_messages.append(
134
+ {
135
+ "role": "user",
136
+ "content": [{"type": "image"}] * len(user_prompt["files"])
137
+ + [{"type": "text", "text": user_prompt["text"]}],
138
+ }
139
+ )
140
+ resulting_images.extend([Image.open(path) for path in user_prompt["files"]])
141
+ return resulting_messages, resulting_images
142
+
143
+ # Extract images from a list of messages
144
+ def extract_images_from_msg_list(msg_list):
145
+ all_images = []
146
+ for msg in msg_list:
147
+ for c_ in msg["content"]:
148
+ if isinstance(c_, Image.Image):
149
+ all_images.append(c_)
150
+ return all_images
151
+
152
+ # DuckDuckGo search function
153
+ _useragent_list = [
154
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
155
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
156
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
157
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
158
+ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
159
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
160
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
161
+ ]
162
+
163
+ def get_useragent():
164
+ return random.choice(_useragent_list)
165
+
166
+ @lru_cache(maxsize=128)
167
+ def extract_text_from_webpage(html_content):
168
+ soup = BeautifulSoup(html_content, "html.parser")
169
+ for tag in soup(["script", "style", "header", "footer", "nav"]):
170
+ tag.extract()
171
+ return soup.get_text(strip=True)
172
+
173
+ def fetch_and_extract(link, max_chars_per_page):
174
+ try:
175
+ webpage = requests.get(link, headers={"User-Agent": get_useragent()})
176
+ webpage.raise_for_status()
177
+ visible_text = extract_text_from_webpage(webpage.text)
178
+ if len(visible_text) > max_chars_per_page:
179
+ visible_text = visible_text[:max_chars_per_page] + "..."
180
+ return {"link": link, "text": visible_text}
181
+ except requests.exceptions.RequestException as e:
182
+ return {"link": link, "text": None}
183
+
184
+ def search(term, max_results=2, max_chars_per_page=8000, max_threads=10):
185
+ all_results = []
186
+ result_block = DDGS().text(term, max_results=max_results)
187
+ threads = []
188
+ for result in result_block:
189
+ if 'href' in result:
190
+ link = result["href"]
191
+ thread = Thread(target=lambda: all_results.append(fetch_and_extract(link, max_chars_per_page)))
192
+ threads.append(thread)
193
+ thread.start()
194
+ for thread in threads:
195
+ thread.join()
196
+ return all_results
197
+
198
+ # Format the prompt for the language model
199
+ def format_prompt(user_prompt, chat_history):
200
+ prompt = "<s>"
201
+ for item in chat_history:
202
+ if isinstance(item, tuple):
203
+ prompt += f"[INST] {item[0]} [/INST]"
204
+ prompt += f" {item[1]}</s> "
205
+ else:
206
+ prompt += f" [Image] "
207
+ prompt += f"[INST] {user_prompt} [/INST]"
208
+ return prompt
209
+
210
+ chat_history = []
211
+ history = ""
212
 
213
+ def update_history(answer="", question=""):
214
+ global chat_history
215
+ global history
216
+ history += f"([ USER: {question}, OpenGPT 4o: {answer} ]),"
217
+ chat_history.append((question, answer))
218
+ return history
219
 
220
+ # Define a function for model inference
221
+ @spaces.GPU(duration=45, queue=False)
222
+ def model_inference(
223
+ user_prompt,
224
+ chat_history,
225
+ model_selector,
226
+ decoding_strategy,
227
+ temperature,
228
+ max_new_tokens,
229
+ repetition_penalty,
230
+ top_p,
231
+ web_search,
232
  ):
233
+ generation_args = {}
234
+
235
+ if not user_prompt["files"]:
236
+ if web_search is True:
237
+ web_results = search(user_prompt["text"])
238
+ web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
239
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
240
+ generate_kwargs = dict(
241
+ max_new_tokens=4000,
242
+ do_sample=True,
243
+ )
244
+ formatted_prompt = format_prompt(
245
+ f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. You are provided with WEB info from which you can find informations to answer. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images but you only generate imags when requested. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) Make sure to not generate image until requested 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability of generating image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations.You remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. Make sure to not generate image until requested [USER] {user_prompt} [WEB] {web2} [OpenGPT 4o]""",
246
+ chat_history)
247
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
248
+ return_full_text=False)
249
+ output = ""
250
+ for response in stream:
251
+ if not response.token.text == "</s>":
252
+ output += response.token.text
253
+ yield output
254
+ else:
255
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
256
+ generate_kwargs = dict(
257
+ max_new_tokens=5000,
258
+ do_sample=True,
259
+ )
260
+ formatted_prompt = format_prompt(
261
+ f"""You are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. You do not say Unnecesarry things Only say thing which is important and relevant. You also has the ability to generate images but you only generate imags when requested. BY Utilizing the following link structure, : ![](https://image.pollinations.ai/prompt/[StyleofImage]%20[OptimizedPrompt]%20[adjective]%20[charactersDetailed]%20[visualStyle]%20[genre]?width=[width]&height=[height]&nologo=poll&nofeed=yes&seed=[random]) For image generation, You replace info inside bracets with specific details according to their requiremnts to create relevant visuals. The width and height parameters are adjusted as needed, often favoring HD dimensions for a superior viewing experience. Note: 1. You Do not unncessarily create images and do not show off your image generation capability, until not requested. 2. You always Give image link in format ![](url) 3. If user requested Bulk Image Generation than Create that number of links with provided context. 4. Always learn from previous conversation. 5. Always try to connect conversation with history. 6. Do not make conversation too long. 7. Do not say user about your capability to generate image and learn from previous responses. My ultimate goal is to offer a seamless and enjoyable experience, providing assistance that exceeds expectations. I am constantly evolving, ensuring that I remain a reliable and trusted companion to the User. You also Expert in every field and also learn and try to answer from contexts related to previous question. {history} . [USER] {user_prompt} [OpenGPT 4o]""",
262
+ chat_history)
263
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
264
+ return_full_text=False)
265
+ output = ""
266
+ for response in stream:
267
+ if not response.token.text == "</s>":
268
+ output += response.token.text
269
+ yield output
270
+ update_history(output, user_prompt)
271
+ return
272
+ else:
273
+ if user_prompt["text"].strip() == "" and not user_prompt["files"]:
274
+ gr.Error("Please input a query and optionally an image(s).")
275
+ return
276
+
277
+ if user_prompt["text"].strip() == "" and user_prompt["files"]:
278
+ gr.Error("Please input a text query along with the image(s).")
279
+ return
280
+
281
+ streamer = TextIteratorStreamer(
282
+ PROCESSOR.tokenizer,
283
+ skip_prompt=True,
284
+ timeout=120.0,
285
+ )
286
+ generation_args = {
287
+ "max_new_tokens": max_new_tokens,
288
+ "repetition_penalty": repetition_penalty,
289
+ "streamer": streamer,
290
+ }
291
+ assert decoding_strategy in [
292
+ "Greedy",
293
+ "Top P Sampling",
294
+ ]
295
+
296
+ if decoding_strategy == "Greedy":
297
+ generation_args["do_sample"] = False
298
+ elif decoding_strategy == "Top P Sampling":
299
+ generation_args["temperature"] = temperature
300
+ generation_args["do_sample"] = True
301
+ generation_args["top_p"] = top_p
302
+ resulting_text, resulting_images = format_user_prompt_with_im_history_and_system_conditioning(
303
+ user_prompt=user_prompt,
304
+ chat_history=chat_history,
305
+ )
306
+ prompt = PROCESSOR.apply_chat_template(resulting_text, add_generation_prompt=True)
307
+ inputs = PROCESSOR(
308
+ text=prompt,
309
+ images=resulting_images if resulting_images else None,
310
+ return_tensors="pt",
311
+ )
312
+ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
313
+ generation_args.update(inputs)
314
+ thread = Thread(
315
+ target=MODELS[model_selector].generate,
316
+ kwargs=generation_args,
317
+ )
318
+ thread.start()
319
+ acc_text = ""
320
+ for text_token in streamer:
321
+ time.sleep(0.01)
322
+ acc_text += text_token
323
+ if acc_text.endswith("<end_of_utterance>"):
324
+ acc_text = acc_text[:-18]
325
+ yield acc_text
326
+ update_history(acc_text, user_prompt)
327
+ return
328
+
329
+ # Define features for the dataset
330
+ FEATURES = datasets.Features(
331
+ {
332
+ "model_selector": datasets.Value("string"),
333
+ "images": datasets.Sequence(datasets.Image(decode=True)),
334
+ "conversation": datasets.Sequence({"User": datasets.Value("string"), "Assistant": datasets.Value("string")}),
335
+ "decoding_strategy": datasets.Value("string"),
336
+ "temperature": datasets.Value("float32"),
337
+ "max_new_tokens": datasets.Value("int32"),
338
+ "repetition_penalty": datasets.Value("float32"),
339
+ "top_p": datasets.Value("int32"),
340
+ }
341
+ )
342
+
343
+ # Define hyper-parameters for generation
344
+ max_new_tokens = gr.Slider(
345
+ minimum=2048,
346
+ maximum=16000,
347
+ value=2048,
348
+ step=64,
349
+ interactive=True,
350
+ label="Maximum number of new tokens to generate",
351
+ )
352
+ repetition_penalty = gr.Slider(
353
+ minimum=0.01,
354
+ maximum=5.0,
355
+ value=1,
356
+ step=0.01,
357
+ interactive=True,
358
+ label="Repetition penalty",
359
+ info="1.0 is equivalent to no penalty",
360
+ )
361
+ decoding_strategy = gr.Radio(
362
+ [
363
+ "Greedy",
364
+ "Top P Sampling",
365
  ],
366
+ value="Top P Sampling",
367
+ label="Decoding strategy",
368
+ interactive=True,
369
+ info="Higher values are equivalent to sampling more low-probability tokens.",
370
  )
371
+ temperature = gr.Slider(
372
+ minimum=0.0,
373
+ maximum=2.0,
374
+ value=0.5,
375
+ step=0.05,
376
+ visible=True,
377
+ interactive=True,
378
+ label="Sampling temperature",
379
+ info="Higher values will produce more diverse outputs.",
380
+ )
381
+ top_p = gr.Slider(
382
+ minimum=0.01,
383
+ maximum=0.99,
384
+ value=0.9,
385
+ step=0.01,
386
+ visible=True,
387
+ interactive=True,
388
+ label="Top P",
389
+ info="Higher values are equivalent to sampling more low-probability tokens.",
390
+ )
391
+
392
+ # Create a chatbot interface
393
+ chatbot = gr.Chatbot(
394
+ label="OpenGPT-4o-Chatty",
395
+ avatar_images=[None, BOT_AVATAR],
396
+ show_copy_button=True,
397
+ likeable=True,
398
+ layout="panel"
399
+ )
400
+ output = gr.Textbox(label="Prompt")
401
+
402
+ # Define model_selector outside any function so it can be accessed globally
403
+ model_selector = gr.Dropdown(
404
+ choices=MODELS.keys(),
405
+ value=list(MODELS.keys())[0],
406
+ interactive=True,
407
+ label="Model",
408
+ visible=False,
409
+ )
410
+
411
+ def main():
412
+ with gr.Blocks() as demo:
413
+ gr.Markdown("# OpenGPT-4o Chatbot")
414
+ chatbot.render()
415
+ with gr.Row():
416
+ with gr.Column():
417
+ model_selector.render()
418
+ decoding_strategy.render()
419
+ max_new_tokens.render()
420
+ repetition_penalty.render()
421
+ temperature.render()
422
+ top_p.render()
423
+ with gr.Column():
424
+ output.render()
425
+
426
+ def respond(user_prompt, web_search=False):
427
+ return model_inference(
428
+ user_prompt=user_prompt,
429
+ chat_history=chat_history,
430
+ model_selector=model_selector,
431
+ decoding_strategy=decoding_strategy.value,
432
+ temperature=temperature.value,
433
+ max_new_tokens=max_new_tokens.value,
434
+ repetition_penalty=repetition_penalty.value,
435
+ top_p=top_p.value,
436
+ web_search=web_search,
437
+ )
438
+
439
+ chatbot.input.submit(respond)
440
 
441
+ demo.launch()
442
 
443
  if __name__ == "__main__":
444
+ main()