burtenshaw commited on
Commit
f49bcc1
·
1 Parent(s): 5ba66b9

update app with formatting and images

Browse files
Files changed (4) hide show
  1. app.py +131 -47
  2. push_data.ipynb +215 -0
  3. pyproject.toml +1 -0
  4. uv.lock +48 -0
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import os
2
  from datetime import datetime
3
  import random
4
-
5
  import gradio as gr
6
  from datasets import load_dataset, Dataset, DatasetDict
7
  from huggingface_hub import whoami, InferenceClient
 
8
 
9
  # Initialize the inference client
10
  client = InferenceClient(
@@ -12,37 +13,64 @@ client = InferenceClient(
12
  )
13
 
14
  # Load questions from Hugging Face dataset
15
- EXAM_MAX_QUESTIONS = os.getenv("EXAM_MAX_QUESTIONS") or 5 # We have 5 questions total
16
- EXAM_PASSING_SCORE = os.getenv("EXAM_PASSING_SCORE") or 0.7
 
 
17
  EXAM_DATASET_ID = "burtenshaw/dummy-code-quiz"
18
 
19
  # prep the dataset for the quiz
20
- ds = load_dataset(EXAM_DATASET_ID, split="train")
21
- quiz_data = ds.to_list()
22
  random.shuffle(quiz_data)
 
 
 
 
 
 
 
 
 
 
 
23
 
24
 
25
- def check_code(user_code, solution, challenge):
 
 
26
  """
27
  Use LLM to evaluate if the user's code solution is correct.
28
  Returns True if the solution is correct, False otherwise.
29
  """
 
 
 
 
 
 
 
 
30
  prompt = f"""You are an expert Python programming instructor evaluating a student's code solution.
31
 
32
  Challenge:
33
  {challenge}
34
 
35
  Reference Solution:
36
- {solution}
37
 
38
  Student's Solution:
39
- {user_code}
 
 
 
40
 
41
  Evaluate if the student's solution is functionally equivalent to the reference solution.
42
  Consider:
43
  1. Does it solve the problem correctly?
44
  2. Does it handle edge cases appropriately?
45
  3. Does it follow the requirements of the challenge?
 
46
 
47
  Respond with ONLY "CORRECT" or "INCORRECT" followed by a brief explanation.
48
  """
@@ -71,18 +99,40 @@ def check_code(user_code, solution, challenge):
71
  except Exception as e:
72
  gr.Warning(f"Error checking code: {str(e)}")
73
  # Fall back to simple string comparison if LLM fails
74
- is_correct = user_code.strip() == solution.strip()
75
  status = "✅ Correct!" if is_correct else "❌ Incorrect!"
76
  gr.Info(f"{status} (Fallback comparison)")
77
  return is_correct
78
 
79
 
80
  def on_user_logged_in(token: gr.OAuthToken | None):
81
- """Handle user login state"""
 
 
 
 
82
  if token is not None:
83
- return gr.update(visible=False), gr.update(visible=True)
 
 
 
 
 
 
 
 
 
84
  else:
85
- return gr.update(visible=True), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
86
 
87
 
88
  def push_results_to_hub(
@@ -168,14 +218,20 @@ def handle_quiz(question_idx, user_answers, submitted_code, is_start):
168
  question_idx < len(quiz_data) and submitted_code.strip()
169
  ): # Only check if there's code
170
  current_q = quiz_data[question_idx]
 
 
171
  is_correct = check_code(
172
- submitted_code, current_q["solution"], current_q["challenge"]
 
 
 
173
  )
174
  user_answers.append(
175
  {
176
  "challenge": current_q["challenge"],
177
- "submitted_code": submitted_code,
178
  "correct_solution": current_q["solution"],
 
179
  "is_correct": is_correct,
180
  }
181
  )
@@ -195,31 +251,36 @@ def handle_quiz(question_idx, user_answers, submitted_code, is_start):
195
  results_text += (
196
  f"Question {idx + 1}: {'✅' if answer['is_correct'] else '❌'}\n"
197
  )
198
- results_text += (
199
- f"Your code:\n```python\n{answer['submitted_code']}\n```\n\n"
200
- )
201
 
202
  return (
203
- "", # question_text becomes blank
204
- gr.update(value="", visible=False), # clear and hide code input
205
- f"{'✅ Passed!' if grade >= float(EXAM_PASSING_SCORE) else '❌ Did not pass'}",
206
- question_idx,
207
- user_answers,
208
- start_btn_update,
209
- gr.update(value=results_text, visible=True), # show final_markdown
 
 
 
210
  )
211
  else:
212
  # Show the next question
213
  q = quiz_data[question_idx]
214
  challenge_text = f"## Question {question_idx + 1} \n### {q['challenge']}"
215
  return (
216
- challenge_text,
217
- gr.update(value=q["placeholder"], visible=True),
218
- "Submit your code solution and click 'Next' to continue.",
219
- question_idx,
220
- user_answers,
221
- start_btn_update,
222
- gr.update(visible=False), # Hide final_markdown
 
 
 
 
 
223
  )
224
 
225
 
@@ -238,35 +299,55 @@ with gr.Blocks() as demo:
238
  )
239
 
240
  with gr.Row(variant="panel"):
241
- question_text = gr.Markdown("")
242
- code_input = gr.Code(language="python", label="Your Solution", visible=False)
 
 
 
 
 
243
 
244
  with gr.Row(variant="compact"):
245
  status_text = gr.Markdown("")
246
 
247
  with gr.Row(variant="compact"):
248
- final_markdown = gr.Markdown("", visible=False)
249
-
250
- next_btn = gr.Button("Next ⏭️")
251
- submit_btn = gr.Button("Submit ✅")
252
 
253
  with gr.Row(variant="compact"):
254
- login_btn = gr.LoginButton()
255
- start_btn = gr.Button("Start", visible=False)
256
 
257
- login_btn.click(fn=on_user_logged_in, inputs=None, outputs=[login_btn, start_btn])
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
  start_btn.click(
260
  fn=handle_quiz,
261
  inputs=[question_idx, user_answers, code_input, gr.State(True)],
262
  outputs=[
263
- question_text,
264
- code_input,
265
- status_text,
266
- question_idx,
267
- user_answers,
268
- start_btn,
269
- final_markdown,
 
 
 
270
  ],
271
  )
272
 
@@ -280,7 +361,10 @@ with gr.Blocks() as demo:
280
  question_idx,
281
  user_answers,
282
  start_btn,
 
 
283
  final_markdown,
 
284
  ],
285
  )
286
 
 
1
  import os
2
  from datetime import datetime
3
  import random
4
+ from typing import List
5
  import gradio as gr
6
  from datasets import load_dataset, Dataset, DatasetDict
7
  from huggingface_hub import whoami, InferenceClient
8
+ import black # Add black import
9
 
10
  # Initialize the inference client
11
  client = InferenceClient(
 
13
  )
14
 
15
  # Load questions from Hugging Face dataset
16
+ EXAM_MAX_QUESTIONS = int(
17
+ os.getenv("EXAM_MAX_QUESTIONS", 5)
18
+ ) # Limit quiz to max questions
19
+ EXAM_PASSING_SCORE = float(os.getenv("EXAM_PASSING_SCORE", 0.8))
20
  EXAM_DATASET_ID = "burtenshaw/dummy-code-quiz"
21
 
22
  # prep the dataset for the quiz
23
+ ds = load_dataset(EXAM_DATASET_ID, split="train", download_mode="force_redownload")
24
+ quiz_data = list(ds) # Convert dataset to list instead of using to_list()
25
  random.shuffle(quiz_data)
26
+ if EXAM_MAX_QUESTIONS:
27
+ quiz_data = quiz_data[:EXAM_MAX_QUESTIONS]
28
+
29
+
30
+ def format_python_code(code: str) -> str:
31
+ """Format Python code using black."""
32
+ try:
33
+ return black.format_str(code, mode=black.Mode())
34
+ except Exception as e:
35
+ gr.Warning(f"Code formatting failed: {str(e)}")
36
+ return code
37
 
38
 
39
+ def check_code(
40
+ user_code: str, solution: str, challenge: str, assessment_criteria: List[str]
41
+ ):
42
  """
43
  Use LLM to evaluate if the user's code solution is correct.
44
  Returns True if the solution is correct, False otherwise.
45
  """
46
+ # Format both user code and solution
47
+ formatted_user_code = format_python_code(user_code)
48
+ formatted_solution = format_python_code(solution)
49
+
50
+ assessment_criteria_str = "\n".join(
51
+ [f"{i + 1}. {c}" for i, c in enumerate(assessment_criteria)]
52
+ )
53
+
54
  prompt = f"""You are an expert Python programming instructor evaluating a student's code solution.
55
 
56
  Challenge:
57
  {challenge}
58
 
59
  Reference Solution:
60
+ {formatted_solution}
61
 
62
  Student's Solution:
63
+ {formatted_user_code}
64
+
65
+ Assessment Criteria:
66
+ {assessment_criteria_str}
67
 
68
  Evaluate if the student's solution is functionally equivalent to the reference solution.
69
  Consider:
70
  1. Does it solve the problem correctly?
71
  2. Does it handle edge cases appropriately?
72
  3. Does it follow the requirements of the challenge?
73
+ 4. Does it meet the assessment criteria?
74
 
75
  Respond with ONLY "CORRECT" or "INCORRECT" followed by a brief explanation.
76
  """
 
99
  except Exception as e:
100
  gr.Warning(f"Error checking code: {str(e)}")
101
  # Fall back to simple string comparison if LLM fails
102
+ is_correct = formatted_user_code.strip() == formatted_solution.strip()
103
  status = "✅ Correct!" if is_correct else "❌ Incorrect!"
104
  gr.Info(f"{status} (Fallback comparison)")
105
  return is_correct
106
 
107
 
108
  def on_user_logged_in(token: gr.OAuthToken | None):
109
+ """
110
+ Handle user login state.
111
+ On a valid token, hide the login button and reveal the Start button while keeping Next and Submit hidden.
112
+ Also, clear the question text, code input, status, and image.
113
+ """
114
  if token is not None:
115
+ return (
116
+ gr.update(visible=False), # login_btn hidden
117
+ gr.update(visible=True), # start_btn shown
118
+ gr.update(visible=False), # next_btn hidden
119
+ gr.update(visible=False), # submit_btn hidden
120
+ "", # Clear question_text
121
+ gr.update(value="", visible=False), # Clear code_input
122
+ "", # Clear status_text
123
+ gr.update(value="", visible=False), # Clear question_image
124
+ )
125
  else:
126
+ return (
127
+ gr.update(visible=True), # login_btn visible
128
+ gr.update(visible=False), # start_btn hidden
129
+ gr.update(visible=False), # next_btn hidden
130
+ gr.update(visible=False), # submit_btn hidden
131
+ "",
132
+ gr.update(value="", visible=False),
133
+ "",
134
+ gr.update(value="", visible=False),
135
+ )
136
 
137
 
138
  def push_results_to_hub(
 
218
  question_idx < len(quiz_data) and submitted_code.strip()
219
  ): # Only check if there's code
220
  current_q = quiz_data[question_idx]
221
+ # Format the submitted code before checking
222
+ formatted_code = format_python_code(submitted_code)
223
  is_correct = check_code(
224
+ formatted_code,
225
+ current_q["solution"],
226
+ current_q["challenge"],
227
+ current_q["assessment_criteria"],
228
  )
229
  user_answers.append(
230
  {
231
  "challenge": current_q["challenge"],
232
+ "submitted_code": formatted_code, # Store formatted code
233
  "correct_solution": current_q["solution"],
234
+ "assessment_criteria": current_q["assessment_criteria"],
235
  "is_correct": is_correct,
236
  }
237
  )
 
251
  results_text += (
252
  f"Question {idx + 1}: {'✅' if answer['is_correct'] else '❌'}\n"
253
  )
 
 
 
254
 
255
  return (
256
+ "", # question_text cleared
257
+ gr.update(value="", visible=False), # hide code_input
258
+ f"{'✅ Passed!' if grade >= EXAM_PASSING_SCORE else '❌ Did not pass'}", # status_text
259
+ question_idx, # updated question index
260
+ user_answers, # accumulated answers
261
+ gr.update(visible=False), # start_btn hidden for quiz-in-progress
262
+ gr.update(visible=False), # next_btn hidden on completion
263
+ gr.update(visible=True), # submit_btn shown
264
+ gr.update(value=results_text, visible=True), # final_markdown with results
265
+ gr.update(visible=False), # question_image hidden on completion
266
  )
267
  else:
268
  # Show the next question
269
  q = quiz_data[question_idx]
270
  challenge_text = f"## Question {question_idx + 1} \n### {q['challenge']}"
271
  return (
272
+ challenge_text, # question_text
273
+ gr.update(value=q["placeholder"], visible=True), # code_input
274
+ "Submit your code solution and click 'Next' to continue.", # status_text
275
+ question_idx, # updated question_idx
276
+ user_answers, # user_answers
277
+ gr.update(visible=False), # start_btn hidden
278
+ gr.update(visible=True), # next_btn visible
279
+ gr.update(visible=False), # submit_btn hidden
280
+ gr.update(visible=False), # final_markdown hidden
281
+ gr.update(
282
+ value=q["image"], visible=True if q["image"] else False
283
+ ), # question_image with current question image
284
  )
285
 
286
 
 
299
  )
300
 
301
  with gr.Row(variant="panel"):
302
+ with gr.Column():
303
+ question_text = gr.Markdown("")
304
+ question_image = gr.Image(
305
+ label="Question Image", visible=False, type="pil"
306
+ ) # Add image component
307
+ with gr.Column():
308
+ code_input = gr.Code(language="python", label="Your Solution", visible=False)
309
 
310
  with gr.Row(variant="compact"):
311
  status_text = gr.Markdown("")
312
 
313
  with gr.Row(variant="compact"):
314
+ login_btn = gr.LoginButton()
315
+ start_btn = gr.Button("Start")
316
+ next_btn = gr.Button("Next ⏭️", visible=False)
317
+ submit_btn = gr.Button("Submit ✅", visible=False)
318
 
319
  with gr.Row(variant="compact"):
320
+ final_markdown = gr.Markdown("", visible=False)
 
321
 
322
+ login_btn.click(
323
+ fn=on_user_logged_in,
324
+ inputs=None,
325
+ outputs=[
326
+ login_btn,
327
+ start_btn,
328
+ next_btn,
329
+ submit_btn,
330
+ question_text,
331
+ code_input,
332
+ status_text,
333
+ question_image,
334
+ ],
335
+ )
336
 
337
  start_btn.click(
338
  fn=handle_quiz,
339
  inputs=[question_idx, user_answers, code_input, gr.State(True)],
340
  outputs=[
341
+ question_text, # Markdown with question text
342
+ code_input, # Code input field
343
+ status_text, # Status text (instructions/status messages)
344
+ question_idx, # Updated question index (state)
345
+ user_answers, # Updated user answers (state)
346
+ start_btn, # Update for start button (will be hidden)
347
+ next_btn, # Update for next button (shown for in-progress quiz)
348
+ submit_btn, # Update for submit button (hidden until end)
349
+ final_markdown, # Final results markdown (hidden until quiz ends)
350
+ question_image, # Image update for the quiz question
351
  ],
352
  )
353
 
 
361
  question_idx,
362
  user_answers,
363
  start_btn,
364
+ next_btn,
365
+ submit_btn,
366
  final_markdown,
367
+ question_image,
368
  ],
369
  )
370
 
push_data.ipynb ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 41,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "Generating train split: 40 examples [00:00, 9668.75 examples/s]"
13
+ ]
14
+ },
15
+ {
16
+ "name": "stdout",
17
+ "output_type": "stream",
18
+ "text": [
19
+ "DatasetDict({\n",
20
+ " train: Dataset({\n",
21
+ " features: ['challenge', 'solution', 'placeholder', 'context', 'assessment_criteria', 'image'],\n",
22
+ " num_rows: 40\n",
23
+ " })\n",
24
+ "})\n"
25
+ ]
26
+ },
27
+ {
28
+ "name": "stderr",
29
+ "output_type": "stream",
30
+ "text": [
31
+ "\n"
32
+ ]
33
+ }
34
+ ],
35
+ "source": [
36
+ "from datasets import load_dataset\n",
37
+ "\n",
38
+ "dataset = load_dataset(\"json\", data_files=\"example.json\")\n",
39
+ "\n",
40
+ "print(dataset)\n"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": 42,
46
+ "metadata": {},
47
+ "outputs": [
48
+ {
49
+ "name": "stderr",
50
+ "output_type": "stream",
51
+ "text": [
52
+ "Map: 100%|██████████| 40/40 [00:00<00:00, 6077.16 examples/s]\n"
53
+ ]
54
+ }
55
+ ],
56
+ "source": [
57
+ "from datasets import Image\n",
58
+ "from PIL import Image as PILImage\n",
59
+ "\n",
60
+ "new_features = dataset[\"train\"].features\n",
61
+ "new_features[\"image\"] = Image()\n",
62
+ "\n",
63
+ "dataset.features = new_features\n",
64
+ "\n",
65
+ "dataset = dataset.map(lambda x: {\"image\": PILImage.open(x[\"image\"]) if x[\"image\"] else None})\n"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": 43,
71
+ "metadata": {},
72
+ "outputs": [
73
+ {
74
+ "data": {
75
+ "text/plain": [
76
+ "{'challenge': Value(dtype='string', id=None),\n",
77
+ " 'solution': Value(dtype='string', id=None),\n",
78
+ " 'placeholder': Value(dtype='string', id=None),\n",
79
+ " 'context': Value(dtype='string', id=None),\n",
80
+ " 'assessment_criteria': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),\n",
81
+ " 'image': Image(mode=None, decode=True, id=None)}"
82
+ ]
83
+ },
84
+ "execution_count": 43,
85
+ "metadata": {},
86
+ "output_type": "execute_result"
87
+ }
88
+ ],
89
+ "source": [
90
+ "dataset = dataset.cast_column(\"image\", Image())\n",
91
+ "\n",
92
+ "dataset[\"train\"].features"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": 44,
98
+ "metadata": {},
99
+ "outputs": [
100
+ {
101
+ "name": "stderr",
102
+ "output_type": "stream",
103
+ "text": [
104
+ "Map: 100%|██████████| 40/40 [00:00<00:00, 13932.25 examples/s]it/s]\n",
105
+ "Creating parquet from Arrow format: 100%|██████████| 1/1 [00:00<00:00, 1119.68ba/s]\n",
106
+ "Uploading the dataset shards: 100%|██████████| 1/1 [00:01<00:00, 1.36s/it]\n"
107
+ ]
108
+ },
109
+ {
110
+ "data": {
111
+ "text/plain": [
112
+ "CommitInfo(commit_url='https://huggingface.co/datasets/burtenshaw/dummy-code-quiz/commit/9fdc884bb54602cce29639e76a7cabedf79811f8', commit_message='Upload dataset', commit_description='', oid='9fdc884bb54602cce29639e76a7cabedf79811f8', pr_url=None, repo_url=RepoUrl('https://huggingface.co/datasets/burtenshaw/dummy-code-quiz', endpoint='https://huggingface.co', repo_type='dataset', repo_id='burtenshaw/dummy-code-quiz'), pr_revision=None, pr_num=None)"
113
+ ]
114
+ },
115
+ "execution_count": 44,
116
+ "metadata": {},
117
+ "output_type": "execute_result"
118
+ }
119
+ ],
120
+ "source": [
121
+ "dataset.push_to_hub(\"burtenshaw/dummy-code-quiz\", private=False)\n"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "code",
126
+ "execution_count": 15,
127
+ "metadata": {},
128
+ "outputs": [
129
+ {
130
+ "data": {
131
+ "text/plain": [
132
+ "'/Users/ben/code/code_assignment_app/images/1.png'"
133
+ ]
134
+ },
135
+ "execution_count": 15,
136
+ "metadata": {},
137
+ "output_type": "execute_result"
138
+ }
139
+ ],
140
+ "source": [
141
+ "dataset[\"train\"][0][\"image\"]"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 3,
147
+ "metadata": {},
148
+ "outputs": [
149
+ {
150
+ "name": "stderr",
151
+ "output_type": "stream",
152
+ "text": [
153
+ "Generating burtenshaw split: 100%|██████████| 1/1 [00:00<00:00, 262.13 examples/s]\n"
154
+ ]
155
+ }
156
+ ],
157
+ "source": [
158
+ "response_ds = load_dataset(\"agents-course/dummy-code-quiz_responses\")"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": 4,
164
+ "metadata": {},
165
+ "outputs": [
166
+ {
167
+ "data": {
168
+ "text/plain": [
169
+ "DatasetDict({\n",
170
+ " burtenshaw: Dataset({\n",
171
+ " features: ['username', 'datetime', 'grade', 'challenge', 'submitted_code', 'correct_solution', 'is_correct'],\n",
172
+ " num_rows: 1\n",
173
+ " })\n",
174
+ "})"
175
+ ]
176
+ },
177
+ "execution_count": 4,
178
+ "metadata": {},
179
+ "output_type": "execute_result"
180
+ }
181
+ ],
182
+ "source": [
183
+ "response_ds"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "metadata": {},
190
+ "outputs": [],
191
+ "source": []
192
+ }
193
+ ],
194
+ "metadata": {
195
+ "kernelspec": {
196
+ "display_name": ".venv",
197
+ "language": "python",
198
+ "name": "python3"
199
+ },
200
+ "language_info": {
201
+ "codemirror_mode": {
202
+ "name": "ipython",
203
+ "version": 3
204
+ },
205
+ "file_extension": ".py",
206
+ "mimetype": "text/x-python",
207
+ "name": "python",
208
+ "nbconvert_exporter": "python",
209
+ "pygments_lexer": "ipython3",
210
+ "version": "3.11.10"
211
+ }
212
+ },
213
+ "nbformat": 4,
214
+ "nbformat_minor": 2
215
+ }
pyproject.toml CHANGED
@@ -5,6 +5,7 @@ description = "Add your description here"
5
  readme = "README.md"
6
  requires-python = ">=3.11"
7
  dependencies = [
 
8
  "datasets>=3.2.0",
9
  "gradio[oauth]>=5.13.2",
10
  "huggingface-hub>=0.28.0",
 
5
  readme = "README.md"
6
  requires-python = ">=3.11"
7
  dependencies = [
8
+ "black>=25.1.0",
9
  "datasets>=3.2.0",
10
  "gradio[oauth]>=5.13.2",
11
  "huggingface-hub>=0.28.0",
uv.lock CHANGED
@@ -200,6 +200,34 @@ wheels = [
200
  { url = "https://files.pythonhosted.org/packages/e8/6a/e83a6c04f8c6014c33d97c135782a55370cf60513f8d9f99f1279c7f9c13/Authlib-1.4.1-py2.py3-none-any.whl", hash = "sha256:edc29c3f6a3e72cd9e9f45fff67fc663a2c364022eb0371c003f22d5405915c1", size = 225610 },
201
  ]
202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  [[package]]
204
  name = "certifi"
205
  version = "2024.12.14"
@@ -319,6 +347,7 @@ name = "code-assignment-app"
319
  version = "0.1.0"
320
  source = { virtual = "." }
321
  dependencies = [
 
322
  { name = "datasets" },
323
  { name = "gradio", extra = ["oauth"] },
324
  { name = "huggingface-hub" },
@@ -327,6 +356,7 @@ dependencies = [
327
 
328
  [package.metadata]
329
  requires-dist = [
 
330
  { name = "datasets", specifier = ">=3.2.0" },
331
  { name = "gradio", extras = ["oauth"], specifier = ">=5.13.2" },
332
  { name = "huggingface-hub", specifier = ">=0.28.0" },
@@ -922,6 +952,15 @@ wheels = [
922
  { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 },
923
  ]
924
 
 
 
 
 
 
 
 
 
 
925
  [[package]]
926
  name = "nest-asyncio"
927
  version = "1.6.0"
@@ -1085,6 +1124,15 @@ wheels = [
1085
  { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 },
1086
  ]
1087
 
 
 
 
 
 
 
 
 
 
1088
  [[package]]
1089
  name = "pexpect"
1090
  version = "4.9.0"
 
200
  { url = "https://files.pythonhosted.org/packages/e8/6a/e83a6c04f8c6014c33d97c135782a55370cf60513f8d9f99f1279c7f9c13/Authlib-1.4.1-py2.py3-none-any.whl", hash = "sha256:edc29c3f6a3e72cd9e9f45fff67fc663a2c364022eb0371c003f22d5405915c1", size = 225610 },
201
  ]
202
 
203
+ [[package]]
204
+ name = "black"
205
+ version = "25.1.0"
206
+ source = { registry = "https://pypi.org/simple" }
207
+ dependencies = [
208
+ { name = "click" },
209
+ { name = "mypy-extensions" },
210
+ { name = "packaging" },
211
+ { name = "pathspec" },
212
+ { name = "platformdirs" },
213
+ ]
214
+ sdist = { url = "https://files.pythonhosted.org/packages/94/49/26a7b0f3f35da4b5a65f081943b7bcd22d7002f5f0fb8098ec1ff21cb6ef/black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666", size = 649449 }
215
+ wheels = [
216
+ { url = "https://files.pythonhosted.org/packages/7e/4f/87f596aca05c3ce5b94b8663dbfe242a12843caaa82dd3f85f1ffdc3f177/black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0", size = 1614372 },
217
+ { url = "https://files.pythonhosted.org/packages/e7/d0/2c34c36190b741c59c901e56ab7f6e54dad8df05a6272a9747ecef7c6036/black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299", size = 1442865 },
218
+ { url = "https://files.pythonhosted.org/packages/21/d4/7518c72262468430ead45cf22bd86c883a6448b9eb43672765d69a8f1248/black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096", size = 1749699 },
219
+ { url = "https://files.pythonhosted.org/packages/58/db/4f5beb989b547f79096e035c4981ceb36ac2b552d0ac5f2620e941501c99/black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2", size = 1428028 },
220
+ { url = "https://files.pythonhosted.org/packages/83/71/3fe4741df7adf015ad8dfa082dd36c94ca86bb21f25608eb247b4afb15b2/black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b", size = 1650988 },
221
+ { url = "https://files.pythonhosted.org/packages/13/f3/89aac8a83d73937ccd39bbe8fc6ac8860c11cfa0af5b1c96d081facac844/black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc", size = 1453985 },
222
+ { url = "https://files.pythonhosted.org/packages/6f/22/b99efca33f1f3a1d2552c714b1e1b5ae92efac6c43e790ad539a163d1754/black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f", size = 1783816 },
223
+ { url = "https://files.pythonhosted.org/packages/18/7e/a27c3ad3822b6f2e0e00d63d58ff6299a99a5b3aee69fa77cd4b0076b261/black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba", size = 1440860 },
224
+ { url = "https://files.pythonhosted.org/packages/98/87/0edf98916640efa5d0696e1abb0a8357b52e69e82322628f25bf14d263d1/black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f", size = 1650673 },
225
+ { url = "https://files.pythonhosted.org/packages/52/e5/f7bf17207cf87fa6e9b676576749c6b6ed0d70f179a3d812c997870291c3/black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3", size = 1453190 },
226
+ { url = "https://files.pythonhosted.org/packages/e3/ee/adda3d46d4a9120772fae6de454c8495603c37c4c3b9c60f25b1ab6401fe/black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171", size = 1782926 },
227
+ { url = "https://files.pythonhosted.org/packages/cc/64/94eb5f45dcb997d2082f097a3944cfc7fe87e071907f677e80788a2d7b7a/black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18", size = 1442613 },
228
+ { url = "https://files.pythonhosted.org/packages/09/71/54e999902aed72baf26bca0d50781b01838251a462612966e9fc4891eadd/black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717", size = 207646 },
229
+ ]
230
+
231
  [[package]]
232
  name = "certifi"
233
  version = "2024.12.14"
 
347
  version = "0.1.0"
348
  source = { virtual = "." }
349
  dependencies = [
350
+ { name = "black" },
351
  { name = "datasets" },
352
  { name = "gradio", extra = ["oauth"] },
353
  { name = "huggingface-hub" },
 
356
 
357
  [package.metadata]
358
  requires-dist = [
359
+ { name = "black", specifier = ">=25.1.0" },
360
  { name = "datasets", specifier = ">=3.2.0" },
361
  { name = "gradio", extras = ["oauth"], specifier = ">=5.13.2" },
362
  { name = "huggingface-hub", specifier = ">=0.28.0" },
 
952
  { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 },
953
  ]
954
 
955
+ [[package]]
956
+ name = "mypy-extensions"
957
+ version = "1.0.0"
958
+ source = { registry = "https://pypi.org/simple" }
959
+ sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 }
960
+ wheels = [
961
+ { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 },
962
+ ]
963
+
964
  [[package]]
965
  name = "nest-asyncio"
966
  version = "1.6.0"
 
1124
  { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 },
1125
  ]
1126
 
1127
+ [[package]]
1128
+ name = "pathspec"
1129
+ version = "0.12.1"
1130
+ source = { registry = "https://pypi.org/simple" }
1131
+ sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043 }
1132
+ wheels = [
1133
+ { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191 },
1134
+ ]
1135
+
1136
  [[package]]
1137
  name = "pexpect"
1138
  version = "4.9.0"