alonsosilva commited on
Commit
d74d427
·
unverified ·
1 Parent(s): f902cca
Files changed (5) hide show
  1. Dockerfile +18 -0
  2. README.md +0 -1
  3. app.py +144 -0
  4. requirements.in +12 -0
  5. requirements.txt +505 -0
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+ COPY --from=ghcr.io/astral-sh/uv:0.4.20 /uv /bin/uv
3
+
4
+ # Set up a new user named "user" with user ID 1000
5
+ RUN useradd -m -u 1000 user
6
+ ENV PATH="/home/user/.local/bin:$PATH"
7
+ ENV UV_SYSTEM_PYTHON=1
8
+
9
+ WORKDIR /app
10
+
11
+ COPY --chown=user ./requirements.txt requirements.txt
12
+ RUN uv pip install -r requirements.txt
13
+
14
+ COPY --chown=user . /app
15
+ # Switch to the "user" user
16
+ USER user
17
+
18
+ CMD ["solara", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -9,4 +9,3 @@ license: apache-2.0
9
  short_description: Forcing a language model not to use a vowel
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  short_description: Forcing a language model not to use a vowel
10
  ---
11
 
 
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from threading import Thread
3
+ from typing import List
4
+ import torch
5
+ import solara
6
+ from unicodedata import normalize
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
8
+ from transformers.generation import LogitsProcessor
9
+ from typing_extensions import TypedDict
10
+
11
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+
13
+ model_id = "Qwen/Qwen3-0.6B"
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_id, cache_dir="/big_storage/llms/hf_models/"
16
+ ).to(device)
17
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
18
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
19
+
20
+ def response_generator(user_input, logits_processor=[], enable_thinking=False):
21
+ prompt = tokenizer.apply_chat_template(
22
+ [{"role": "user", "content": user_input}],
23
+ tokenize=False,
24
+ add_generation_prompt=True,
25
+ enable_thinking=enable_thinking
26
+ )
27
+ model_inputs = tokenizer(prompt, return_tensors="pt").to(device)
28
+ generation_kwargs = dict(
29
+ model_inputs,
30
+ streamer=streamer,
31
+ logits_processor=logits_processor,
32
+ max_new_tokens=4 * 1024,
33
+ do_sample=True,
34
+ temperature=0.7,
35
+ top_p=1.0,
36
+ top_k=50,
37
+ )
38
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
39
+ thread.start()
40
+ for chunk in streamer:
41
+ if tokenizer.eos_token in chunk or tokenizer.pad_token in chunk:
42
+ chunk = chunk.split(tokenizer.eos_token)[0]
43
+ chunk = chunk.split(tokenizer.pad_token)[0]
44
+ yield chunk
45
+ thread.join()
46
+
47
+ list_of_vowels = ["a", "e", "i", "o", "u"]
48
+ tokens_per_vowel = dict()
49
+ for vowel in list_of_vowels:
50
+ tokens_containing_a_given_vowel = []
51
+ for token_id in range(tokenizer.vocab_size):
52
+ if (
53
+ vowel in tokenizer.decode(token_id)
54
+ or vowel.upper() in tokenizer.decode(token_id)
55
+ or normalize('NFC', f"{vowel}\u0300") in tokenizer.decode(token_id)
56
+ or normalize('NFC', f"{vowel}\u0301") in tokenizer.decode(token_id)
57
+ or normalize('NFC', f"{vowel}\u0302") in tokenizer.decode(token_id)
58
+ or normalize('NFC', f"{vowel}\u0303") in tokenizer.decode(token_id)
59
+ or normalize('NFC', f"{vowel}\u0308") in tokenizer.decode(token_id)
60
+ ):
61
+ tokens_containing_a_given_vowel.append(token_id)
62
+ tokens_per_vowel[vowel] = tokens_containing_a_given_vowel
63
+
64
+ class GeorgePerecLogitsProcessor(LogitsProcessor):
65
+ def __init__(self, forbidden_tokens: List[int]):
66
+ self.forbidden_tokens = forbidden_tokens
67
+
68
+ def __call__(
69
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
70
+ ) -> torch.FloatTensor:
71
+ scores_processed = scores.clone()
72
+ vocab_tensor = torch.arange(scores.shape[-1], device=scores.device)
73
+ forbidden_tokens = torch.tensor(self.forbidden_tokens, device=scores.device)
74
+ forbidden_tokens_mask = torch.isin(vocab_tensor, forbidden_tokens)
75
+ scores_processed = torch.where(forbidden_tokens_mask, -torch.inf, scores)
76
+
77
+ return scores_processed
78
+
79
+
80
+ def add_chunk_to_ai_message(chunk: str):
81
+ messages.value = [
82
+ *messages.value[:-1],
83
+ {
84
+ "role": "assistant",
85
+ "content": messages.value[-1]["content"] + chunk,
86
+ },
87
+ ]
88
+
89
+ class MessageDict(TypedDict):
90
+ role: str
91
+ content: str
92
+
93
+ messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
94
+ enable_thinking_options = [True, False]
95
+ enable_thinking = solara.reactive(False)
96
+ vowels = ["a", "e", "i", "o", "u", "None"]
97
+ vowel = solara.reactive("e")
98
+ @solara.component
99
+ def Page():
100
+ solara.lab.theme.themes.light.primary = "#0000ff"
101
+ solara.lab.theme.themes.light.secondary = "#0000ff"
102
+ solara.lab.theme.themes.dark.primary = "#0000ff"
103
+ solara.lab.theme.themes.dark.secondary = "#0000ff"
104
+ title = "Georges Perec"
105
+ with solara.Head():
106
+ solara.Title(f"{title}")
107
+ with solara.Column(align="center"):
108
+ with solara.Sidebar():
109
+ solara.Markdown("# G⎵org⎵s P⎵r⎵c")
110
+ solara.Markdown("## Forcing a language model not to use a vowel")
111
+ solara.Markdown("Select a forbidden vowel:")
112
+ solara.ToggleButtonsSingle(value=vowel, values=vowels)
113
+ solara.Markdown("Enable thinking:")
114
+ solara.ToggleButtonsSingle(value=enable_thinking, values=enable_thinking_options)
115
+ if vowel.value == "None":
116
+ logits_processor = []
117
+ else:
118
+ logits_processor = [
119
+ GeorgePerecLogitsProcessor(
120
+ forbidden_tokens=tokens_per_vowel[vowel.value],
121
+ )
122
+ ]
123
+ user_message_count = len([m for m in messages.value if m["role"] == "user"])
124
+ def send(message):
125
+ messages.value = [*messages.value, {"role": "user", "content": message}]
126
+ def response(message):
127
+ messages.value = [*messages.value, {"role": "assistant", "content": ""}]
128
+ for chunk in response_generator(message, logits_processor=logits_processor, enable_thinking=enable_thinking.value):
129
+ add_chunk_to_ai_message(chunk)
130
+ def result():
131
+ if messages.value != []:
132
+ response(messages.value[-1]["content"])
133
+ result = solara.lab.use_task(result, dependencies=[user_message_count])
134
+ with solara.lab.ChatBox(style={"position": "fixed", "overflow-y": "scroll","scrollbar-width": "none", "-ms-overflow-style": "none", "top": "0", "bottom": "10rem", "width": "60%"}):
135
+ for item in messages.value:
136
+ with solara.lab.ChatMessage(
137
+ user=item["role"] == "user",
138
+ name="User" if item["role"] == "user" else "Assistant",
139
+ avatar_background_color="#33cccc" if item["role"] == "assistant" else "#ff991f",
140
+ border_radius="20px",
141
+ style="background-color:darkgrey!important;" if solara.lab.theme.dark_effective else "background-color:lightgrey!important;"
142
+ ):
143
+ solara.Markdown(item["content"])
144
+ solara.lab.ChatInput(send_callback=send, style={"position": "fixed", "bottom": "3rem", "width": "60%"})
requirements.in ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ jupyterlab
2
+ jupyterlab_execute_time
3
+ black
4
+ isort
5
+ ruff
6
+ jupyterlab_code_formatter
7
+ jupytext
8
+ ipywidgets
9
+ torch
10
+ transformers
11
+ openai
12
+ solara
requirements.txt ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile requirements.in
3
+ annotated-types==0.7.0
4
+ # via pydantic
5
+ anyio==4.9.0
6
+ # via
7
+ # httpx
8
+ # jupyter-server
9
+ # openai
10
+ # starlette
11
+ # watchfiles
12
+ argon2-cffi==25.1.0
13
+ # via jupyter-server
14
+ argon2-cffi-bindings==21.2.0
15
+ # via argon2-cffi
16
+ arrow==1.3.0
17
+ # via isoduration
18
+ asttokens==3.0.0
19
+ # via stack-data
20
+ async-lru==2.0.5
21
+ # via jupyterlab
22
+ attrs==25.3.0
23
+ # via
24
+ # jsonschema
25
+ # referencing
26
+ babel==2.17.0
27
+ # via jupyterlab-server
28
+ beautifulsoup4==4.13.4
29
+ # via nbconvert
30
+ black==25.1.0
31
+ # via -r requirements.in
32
+ bleach==6.2.0
33
+ # via nbconvert
34
+ cachetools==6.1.0
35
+ # via solara-ui
36
+ certifi==2025.7.14
37
+ # via
38
+ # httpcore
39
+ # httpx
40
+ # requests
41
+ cffi==1.17.1
42
+ # via argon2-cffi-bindings
43
+ charset-normalizer==3.4.2
44
+ # via requests
45
+ click==8.2.1
46
+ # via
47
+ # black
48
+ # rich-click
49
+ # solara-server
50
+ # uvicorn
51
+ comm==0.2.2
52
+ # via
53
+ # ipykernel
54
+ # ipywidgets
55
+ debugpy==1.8.15
56
+ # via ipykernel
57
+ decorator==5.2.1
58
+ # via ipython
59
+ defusedxml==0.7.1
60
+ # via nbconvert
61
+ distro==1.9.0
62
+ # via openai
63
+ executing==2.2.0
64
+ # via stack-data
65
+ fastjsonschema==2.21.1
66
+ # via nbformat
67
+ filelock==3.18.0
68
+ # via
69
+ # huggingface-hub
70
+ # solara-server
71
+ # torch
72
+ # transformers
73
+ fqdn==1.5.1
74
+ # via jsonschema
75
+ fsspec==2025.7.0
76
+ # via
77
+ # huggingface-hub
78
+ # torch
79
+ h11==0.16.0
80
+ # via
81
+ # httpcore
82
+ # uvicorn
83
+ hf-xet==1.1.5
84
+ # via huggingface-hub
85
+ httpcore==1.0.9
86
+ # via httpx
87
+ httpx==0.28.1
88
+ # via
89
+ # jupyterlab
90
+ # openai
91
+ huggingface-hub==0.33.4
92
+ # via
93
+ # tokenizers
94
+ # transformers
95
+ humanize==4.12.3
96
+ # via solara-ui
97
+ idna==3.10
98
+ # via
99
+ # anyio
100
+ # httpx
101
+ # jsonschema
102
+ # requests
103
+ ipykernel==6.29.5
104
+ # via
105
+ # jupyterlab
106
+ # solara-server
107
+ ipython==9.4.0
108
+ # via
109
+ # ipykernel
110
+ # ipywidgets
111
+ ipython-pygments-lexers==1.1.1
112
+ # via ipython
113
+ ipyvue==1.11.2
114
+ # via
115
+ # ipyvuetify
116
+ # solara-ui
117
+ ipyvuetify==1.11.3
118
+ # via solara-ui
119
+ ipywidgets==8.1.7
120
+ # via
121
+ # -r requirements.in
122
+ # ipyvue
123
+ # reacton
124
+ # solara-ui
125
+ isoduration==20.11.0
126
+ # via jsonschema
127
+ isort==6.0.1
128
+ # via -r requirements.in
129
+ jedi==0.19.2
130
+ # via ipython
131
+ jinja2==3.1.6
132
+ # via
133
+ # jupyter-server
134
+ # jupyterlab
135
+ # jupyterlab-server
136
+ # nbconvert
137
+ # solara-server
138
+ # torch
139
+ jiter==0.10.0
140
+ # via openai
141
+ json5==0.12.0
142
+ # via jupyterlab-server
143
+ jsonpointer==3.0.0
144
+ # via jsonschema
145
+ jsonschema==4.24.1
146
+ # via
147
+ # jupyter-events
148
+ # jupyterlab-server
149
+ # nbformat
150
+ jsonschema-specifications==2025.4.1
151
+ # via jsonschema
152
+ jupyter-client==8.6.3
153
+ # via
154
+ # ipykernel
155
+ # jupyter-server
156
+ # nbclient
157
+ # solara-server
158
+ jupyter-core==5.8.1
159
+ # via
160
+ # ipykernel
161
+ # jupyter-client
162
+ # jupyter-server
163
+ # jupyterlab
164
+ # nbclient
165
+ # nbconvert
166
+ # nbformat
167
+ jupyter-events==0.12.0
168
+ # via jupyter-server
169
+ jupyter-lsp==2.2.5
170
+ # via jupyterlab
171
+ jupyter-server==2.16.0
172
+ # via
173
+ # jupyter-lsp
174
+ # jupyterlab
175
+ # jupyterlab-code-formatter
176
+ # jupyterlab-server
177
+ # notebook-shim
178
+ jupyter-server-terminals==0.5.3
179
+ # via jupyter-server
180
+ jupyterlab==4.4.4
181
+ # via
182
+ # -r requirements.in
183
+ # jupyterlab-execute-time
184
+ jupyterlab-code-formatter==3.0.2
185
+ # via -r requirements.in
186
+ jupyterlab-execute-time==3.2.0
187
+ # via -r requirements.in
188
+ jupyterlab-pygments==0.3.0
189
+ # via nbconvert
190
+ jupyterlab-server==2.27.3
191
+ # via jupyterlab
192
+ jupyterlab-widgets==3.0.15
193
+ # via ipywidgets
194
+ jupytext==1.17.2
195
+ # via -r requirements.in
196
+ markdown==3.8.2
197
+ # via
198
+ # pymdown-extensions
199
+ # solara-ui
200
+ markdown-it-py==3.0.0
201
+ # via
202
+ # jupytext
203
+ # mdit-py-plugins
204
+ # rich
205
+ markupsafe==3.0.2
206
+ # via
207
+ # jinja2
208
+ # nbconvert
209
+ matplotlib-inline==0.1.7
210
+ # via
211
+ # ipykernel
212
+ # ipython
213
+ mdit-py-plugins==0.4.2
214
+ # via jupytext
215
+ mdurl==0.1.2
216
+ # via markdown-it-py
217
+ mistune==3.1.3
218
+ # via nbconvert
219
+ mpmath==1.3.0
220
+ # via sympy
221
+ mypy-extensions==1.1.0
222
+ # via black
223
+ nbclient==0.10.2
224
+ # via nbconvert
225
+ nbconvert==7.16.6
226
+ # via jupyter-server
227
+ nbformat==5.10.4
228
+ # via
229
+ # jupyter-server
230
+ # jupytext
231
+ # nbclient
232
+ # nbconvert
233
+ # solara-server
234
+ nest-asyncio==1.6.0
235
+ # via ipykernel
236
+ networkx==3.5
237
+ # via torch
238
+ notebook-shim==0.2.4
239
+ # via jupyterlab
240
+ numpy==2.3.1
241
+ # via
242
+ # solara-ui
243
+ # transformers
244
+ nvidia-cublas-cu12==12.6.4.1
245
+ # via
246
+ # nvidia-cudnn-cu12
247
+ # nvidia-cusolver-cu12
248
+ # torch
249
+ nvidia-cuda-cupti-cu12==12.6.80
250
+ # via torch
251
+ nvidia-cuda-nvrtc-cu12==12.6.77
252
+ # via torch
253
+ nvidia-cuda-runtime-cu12==12.6.77
254
+ # via torch
255
+ nvidia-cudnn-cu12==9.5.1.17
256
+ # via torch
257
+ nvidia-cufft-cu12==11.3.0.4
258
+ # via torch
259
+ nvidia-cufile-cu12==1.11.1.6
260
+ # via torch
261
+ nvidia-curand-cu12==10.3.7.77
262
+ # via torch
263
+ nvidia-cusolver-cu12==11.7.1.2
264
+ # via torch
265
+ nvidia-cusparse-cu12==12.5.4.2
266
+ # via
267
+ # nvidia-cusolver-cu12
268
+ # torch
269
+ nvidia-cusparselt-cu12==0.6.3
270
+ # via torch
271
+ nvidia-nccl-cu12==2.26.2
272
+ # via torch
273
+ nvidia-nvjitlink-cu12==12.6.85
274
+ # via
275
+ # nvidia-cufft-cu12
276
+ # nvidia-cusolver-cu12
277
+ # nvidia-cusparse-cu12
278
+ # torch
279
+ nvidia-nvtx-cu12==12.6.77
280
+ # via torch
281
+ openai==1.97.0
282
+ # via -r requirements.in
283
+ overrides==7.7.0
284
+ # via jupyter-server
285
+ packaging==25.0
286
+ # via
287
+ # black
288
+ # huggingface-hub
289
+ # ipykernel
290
+ # jupyter-events
291
+ # jupyter-server
292
+ # jupyterlab
293
+ # jupyterlab-code-formatter
294
+ # jupyterlab-server
295
+ # jupytext
296
+ # nbconvert
297
+ # transformers
298
+ pandocfilters==1.5.1
299
+ # via nbconvert
300
+ parso==0.8.4
301
+ # via jedi
302
+ pathspec==0.12.1
303
+ # via black
304
+ pexpect==4.9.0
305
+ # via ipython
306
+ pillow==11.3.0
307
+ # via solara-ui
308
+ platformdirs==4.3.8
309
+ # via
310
+ # black
311
+ # jupyter-core
312
+ prometheus-client==0.22.1
313
+ # via jupyter-server
314
+ prompt-toolkit==3.0.51
315
+ # via ipython
316
+ psutil==7.0.0
317
+ # via ipykernel
318
+ ptyprocess==0.7.0
319
+ # via
320
+ # pexpect
321
+ # terminado
322
+ pure-eval==0.2.3
323
+ # via stack-data
324
+ pycparser==2.22
325
+ # via cffi
326
+ pydantic==2.11.7
327
+ # via openai
328
+ pydantic-core==2.33.2
329
+ # via pydantic
330
+ pygments==2.19.2
331
+ # via
332
+ # ipython
333
+ # ipython-pygments-lexers
334
+ # nbconvert
335
+ # rich
336
+ # solara-ui
337
+ pymdown-extensions==10.16
338
+ # via solara-ui
339
+ python-dateutil==2.9.0.post0
340
+ # via
341
+ # arrow
342
+ # jupyter-client
343
+ python-json-logger==3.3.0
344
+ # via jupyter-events
345
+ pyyaml==6.0.2
346
+ # via
347
+ # huggingface-hub
348
+ # jupyter-events
349
+ # jupytext
350
+ # pymdown-extensions
351
+ # transformers
352
+ pyzmq==27.0.0
353
+ # via
354
+ # ipykernel
355
+ # jupyter-client
356
+ # jupyter-server
357
+ reacton==1.9.1
358
+ # via solara-ui
359
+ referencing==0.36.2
360
+ # via
361
+ # jsonschema
362
+ # jsonschema-specifications
363
+ # jupyter-events
364
+ regex==2024.11.6
365
+ # via transformers
366
+ requests==2.32.4
367
+ # via
368
+ # huggingface-hub
369
+ # jupyterlab-server
370
+ # solara-ui
371
+ # transformers
372
+ rfc3339-validator==0.1.4
373
+ # via
374
+ # jsonschema
375
+ # jupyter-events
376
+ rfc3986-validator==0.1.1
377
+ # via
378
+ # jsonschema
379
+ # jupyter-events
380
+ rich==14.0.0
381
+ # via rich-click
382
+ rich-click==1.8.9
383
+ # via solara-server
384
+ rpds-py==0.26.0
385
+ # via
386
+ # jsonschema
387
+ # referencing
388
+ ruff==0.12.4
389
+ # via -r requirements.in
390
+ safetensors==0.5.3
391
+ # via transformers
392
+ send2trash==1.8.3
393
+ # via jupyter-server
394
+ setuptools==80.9.0
395
+ # via
396
+ # jupyterlab
397
+ # torch
398
+ # triton
399
+ six==1.17.0
400
+ # via
401
+ # python-dateutil
402
+ # rfc3339-validator
403
+ sniffio==1.3.1
404
+ # via
405
+ # anyio
406
+ # openai
407
+ solara==1.50.1
408
+ # via -r requirements.in
409
+ solara-server==1.50.1
410
+ # via solara
411
+ solara-ui==1.50.1
412
+ # via
413
+ # solara
414
+ # solara-server
415
+ soupsieve==2.7
416
+ # via beautifulsoup4
417
+ stack-data==0.6.3
418
+ # via ipython
419
+ starlette==0.47.1
420
+ # via solara-server
421
+ sympy==1.14.0
422
+ # via torch
423
+ terminado==0.18.1
424
+ # via
425
+ # jupyter-server
426
+ # jupyter-server-terminals
427
+ tinycss2==1.4.0
428
+ # via bleach
429
+ tokenizers==0.21.2
430
+ # via transformers
431
+ torch==2.7.1
432
+ # via -r requirements.in
433
+ tornado==6.5.1
434
+ # via
435
+ # ipykernel
436
+ # jupyter-client
437
+ # jupyter-server
438
+ # jupyterlab
439
+ # terminado
440
+ tqdm==4.67.1
441
+ # via
442
+ # huggingface-hub
443
+ # openai
444
+ # transformers
445
+ traitlets==5.14.3
446
+ # via
447
+ # comm
448
+ # ipykernel
449
+ # ipython
450
+ # ipywidgets
451
+ # jupyter-client
452
+ # jupyter-core
453
+ # jupyter-events
454
+ # jupyter-server
455
+ # jupyterlab
456
+ # matplotlib-inline
457
+ # nbclient
458
+ # nbconvert
459
+ # nbformat
460
+ transformers==4.53.2
461
+ # via -r requirements.in
462
+ triton==3.3.1
463
+ # via torch
464
+ types-python-dateutil==2.9.0.20250708
465
+ # via arrow
466
+ typing-extensions==4.14.1
467
+ # via
468
+ # anyio
469
+ # beautifulsoup4
470
+ # huggingface-hub
471
+ # openai
472
+ # pydantic
473
+ # pydantic-core
474
+ # reacton
475
+ # referencing
476
+ # rich-click
477
+ # starlette
478
+ # torch
479
+ # typing-inspection
480
+ typing-inspection==0.4.1
481
+ # via pydantic
482
+ uri-template==1.3.0
483
+ # via jsonschema
484
+ urllib3==2.5.0
485
+ # via requests
486
+ uvicorn==0.35.0
487
+ # via solara-server
488
+ watchdog==6.0.0
489
+ # via solara-server
490
+ watchfiles==1.1.0
491
+ # via solara-server
492
+ wcwidth==0.2.13
493
+ # via prompt-toolkit
494
+ webcolors==24.11.1
495
+ # via jsonschema
496
+ webencodings==0.5.1
497
+ # via
498
+ # bleach
499
+ # tinycss2
500
+ websocket-client==1.8.0
501
+ # via jupyter-server
502
+ websockets==15.0.1
503
+ # via solara-server
504
+ widgetsnbextension==4.0.14
505
+ # via ipywidgets