Nymbo commited on
Commit
69de3d2
·
verified ·
1 Parent(s): 878aff7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -46
app.py CHANGED
@@ -23,7 +23,15 @@ def respond(
23
  seed,
24
  custom_model
25
  ):
 
 
26
 
 
 
 
 
 
 
27
  print(f"Received message: {message}")
28
  print(f"History: {history}")
29
  print(f"System message: {system_message}")
@@ -35,6 +43,7 @@ def respond(
35
  if seed == -1:
36
  seed = None
37
 
 
38
  messages = [{"role": "system", "content": system_message}]
39
  print("Initial messages array constructed.")
40
 
@@ -61,6 +70,7 @@ def respond(
61
  response = ""
62
  print("Sending request to OpenAI API.")
63
 
 
64
  for message_chunk in client.chat.completions.create(
65
  model=model_to_use,
66
  max_tokens=max_tokens,
@@ -78,13 +88,29 @@ def respond(
78
 
79
  print("Completed response generation.")
80
 
81
- # GRADIO UI
82
 
83
- chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Select a model and begin chatting", likeable=True, layout="panel")
 
 
 
 
 
 
 
 
 
 
 
84
  print("Chatbot interface created.")
85
 
86
- system_message_box = gr.Textbox(value="", placeholder="You are a helpful assistant.", label="System Prompt")
 
 
 
 
 
87
 
 
88
  max_tokens_slider = gr.Slider(
89
  minimum=1,
90
  maximum=4096,
@@ -121,7 +147,9 @@ seed_slider = gr.Slider(
121
  label="Seed (-1 for random)"
122
  )
123
 
124
- # The custom_model_box is what the respond function sees as "custom_model"
 
 
125
  custom_model_box = gr.Textbox(
126
  value="",
127
  label="Custom Model",
@@ -129,14 +157,7 @@ custom_model_box = gr.Textbox(
129
  placeholder="meta-llama/Llama-3.3-70B-Instruct"
130
  )
131
 
132
- def set_custom_model_from_radio(selected):
133
- """
134
- This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
135
- We will update the Custom Model text box with that selection automatically.
136
- """
137
- print(f"Featured model selected: {selected}")
138
- return selected
139
-
140
  demo = gr.ChatInterface(
141
  fn=respond,
142
  additional_inputs=[
@@ -146,7 +167,7 @@ demo = gr.ChatInterface(
146
  top_p_slider,
147
  frequency_penalty_slider,
148
  seed_slider,
149
- custom_model_box,
150
  ],
151
  fill_height=True,
152
  chatbot=chatbot,
@@ -154,36 +175,67 @@ demo = gr.ChatInterface(
154
  )
155
  print("ChatInterface object created.")
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  with demo:
 
158
  with gr.Accordion("Model Selection", open=False):
159
- model_search_box = gr.Textbox(
160
- label="Filter Models",
161
- placeholder="Search for a featured model...",
162
- lines=1
163
- )
164
- print("Model search box created.")
165
-
166
- models_list = [
167
- "meta-llama/Llama-3.3-70B-Instruct",
168
- "meta-llama/Llama-3.2-3B-Instruct",
169
- "meta-llama/Llama-3.2-1B-Instruct",
170
- "meta-llama/Llama-3.1-8B-Instruct",
171
- "NousResearch/Hermes-3-Llama-3.1-8B",
172
- "google/gemma-2-27b-it",
173
- "google/gemma-2-9b-it",
174
- "google/gemma-2-2b-it",
175
- "mistralai/Mistral-Nemo-Instruct-2407",
176
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
177
- "mistralai/Mistral-7B-Instruct-v0.3",
178
- "Qwen/Qwen2.5-72B-Instruct",
179
- "Qwen/QwQ-32B-Preview",
180
- "PowerInfer/SmallThinker-3B-Preview",
181
- "HuggingFaceTB/SmolLM2-1.7B-Instruct",
182
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
183
- "microsoft/Phi-3.5-mini-instruct",
184
- ]
185
- print("Models list initialized.")
186
 
 
187
  featured_model_radio = gr.Radio(
188
  label="Select a model below",
189
  choices=models_list,
@@ -192,12 +244,7 @@ with demo:
192
  )
193
  print("Featured models radio button created.")
194
 
195
- def filter_models(search_term):
196
- print(f"Filtering models with search term: {search_term}")
197
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
198
- print(f"Filtered models: {filtered}")
199
- return gr.update(choices=filtered)
200
-
201
  model_search_box.change(
202
  fn=filter_models,
203
  inputs=model_search_box,
@@ -205,6 +252,7 @@ with demo:
205
  )
206
  print("Model search box change event linked.")
207
 
 
208
  featured_model_radio.change(
209
  fn=set_custom_model_from_radio,
210
  inputs=featured_model_radio,
@@ -214,6 +262,10 @@ with demo:
214
 
215
  print("Gradio interface initialized.")
216
 
 
 
 
 
217
  if __name__ == "__main__":
218
  print("Launching the demo application.")
219
  demo.launch()
 
23
  seed,
24
  custom_model
25
  ):
26
+ """
27
+ This function handles the conversation logic and streams the response.
28
 
29
+ Arguments:
30
+ - message: The new user message
31
+ - history: Chat history in the form of a list of (user_message, assistant_message) pairs
32
+ - system_message: The system prompt specifying how the assistant should behave
33
+ - max_tokens, temperature, top_p, frequency_penalty, seed, custom_model: Various parameters for text generation
34
+ """
35
  print(f"Received message: {message}")
36
  print(f"History: {history}")
37
  print(f"System message: {system_message}")
 
43
  if seed == -1:
44
  seed = None
45
 
46
+ # Create the base system-level message
47
  messages = [{"role": "system", "content": system_message}]
48
  print("Initial messages array constructed.")
49
 
 
70
  response = ""
71
  print("Sending request to OpenAI API.")
72
 
73
+ # Stream tokens from the HF inference endpoint
74
  for message_chunk in client.chat.completions.create(
75
  model=model_to_use,
76
  max_tokens=max_tokens,
 
88
 
89
  print("Completed response generation.")
90
 
 
91
 
92
+ # -------------------------
93
+ # Gradio UI definitions
94
+ # -------------------------
95
+
96
+ # Chatbot interface
97
+ chatbot = gr.Chatbot(
98
+ height=600,
99
+ show_copy_button=True,
100
+ placeholder="Select a model and begin chatting",
101
+ likeable=True,
102
+ layout="panel"
103
+ )
104
  print("Chatbot interface created.")
105
 
106
+ # System prompt textbox
107
+ system_message_box = gr.Textbox(
108
+ value="",
109
+ placeholder="You are a helpful assistant.",
110
+ label="System Prompt"
111
+ )
112
 
113
+ # Sliders
114
  max_tokens_slider = gr.Slider(
115
  minimum=1,
116
  maximum=4096,
 
147
  label="Seed (-1 for random)"
148
  )
149
 
150
+ # This textbox is what the respond() function sees as "custom_model"
151
+ # We will visually place it inside the Model Selection accordion (below),
152
+ # but we define it here so it can be passed to the ChatInterface.
153
  custom_model_box = gr.Textbox(
154
  value="",
155
  label="Custom Model",
 
157
  placeholder="meta-llama/Llama-3.3-70B-Instruct"
158
  )
159
 
160
+ # Create the ChatInterface, referencing the respond function and including all inputs
 
 
 
 
 
 
 
161
  demo = gr.ChatInterface(
162
  fn=respond,
163
  additional_inputs=[
 
167
  top_p_slider,
168
  frequency_penalty_slider,
169
  seed_slider,
170
+ custom_model_box, # We pass it here to the ChatInterface function
171
  ],
172
  fill_height=True,
173
  chatbot=chatbot,
 
175
  )
176
  print("ChatInterface object created.")
177
 
178
+
179
+ # --------------------------
180
+ # Additional Model Selection
181
+ # --------------------------
182
+
183
+ # This is the function that updates the Custom Model textbox whenever the user picks a model from the Radio
184
+ def set_custom_model_from_radio(selected):
185
+ """
186
+ Triggered when the user picks a model from the 'Featured Models' radio.
187
+ We will update the Custom Model text box with that selection automatically.
188
+ """
189
+ print(f"Featured model selected: {selected}")
190
+ return selected
191
+
192
+ # The set of models displayed in the radio
193
+ models_list = [
194
+ "meta-llama/Llama-3.3-70B-Instruct",
195
+ "meta-llama/Llama-3.2-3B-Instruct",
196
+ "meta-llama/Llama-3.2-1B-Instruct",
197
+ "meta-llama/Llama-3.1-8B-Instruct",
198
+ "NousResearch/Hermes-3-Llama-3.1-8B",
199
+ "google/gemma-2-27b-it",
200
+ "google/gemma-2-9b-it",
201
+ "google/gemma-2-2b-it",
202
+ "mistralai/Mistral-Nemo-Instruct-2407",
203
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
204
+ "mistralai/Mistral-7B-Instruct-v0.3",
205
+ "Qwen/Qwen2.5-72B-Instruct",
206
+ "Qwen/QwQ-32B-Preview",
207
+ "PowerInfer/SmallThinker-3B-Preview",
208
+ "HuggingFaceTB/SmolLM2-1.7B-Instruct",
209
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
210
+ "microsoft/Phi-3.5-mini-instruct",
211
+ ]
212
+ print("Models list initialized.")
213
+
214
+ # This function handles searching for models by a user-provided filter
215
+ def filter_models(search_term):
216
+ print(f"Filtering models with search term: {search_term}")
217
+ filtered = [m for m in models_list if search_term.lower() in m.lower()]
218
+ print(f"Filtered models: {filtered}")
219
+ return gr.update(choices=filtered)
220
+
221
+
222
+ # --------------------------------
223
+ # Advanced UI arrangement with demo
224
+ # --------------------------------
225
  with demo:
226
+ # Create an Accordion for model selection
227
  with gr.Accordion("Model Selection", open=False):
228
+ # Place the Filter Models textbox and the Custom Model textbox side by side
229
+ with gr.Row():
230
+ model_search_box = gr.Textbox(
231
+ label="Filter Models",
232
+ placeholder="Search for a featured model...",
233
+ lines=1
234
+ )
235
+ # Render the already-defined 'custom_model_box' so it appears in this row
236
+ custom_model_box.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
+ # Create the Radio for featured models
239
  featured_model_radio = gr.Radio(
240
  label="Select a model below",
241
  choices=models_list,
 
244
  )
245
  print("Featured models radio button created.")
246
 
247
+ # Link the search box to the filtering function
 
 
 
 
 
248
  model_search_box.change(
249
  fn=filter_models,
250
  inputs=model_search_box,
 
252
  )
253
  print("Model search box change event linked.")
254
 
255
+ # Link the radio to the function that sets the custom model textbox
256
  featured_model_radio.change(
257
  fn=set_custom_model_from_radio,
258
  inputs=featured_model_radio,
 
262
 
263
  print("Gradio interface initialized.")
264
 
265
+
266
+ # -----------------------
267
+ # Launch the application
268
+ # -----------------------
269
  if __name__ == "__main__":
270
  print("Launching the demo application.")
271
  demo.launch()