Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,340 +1,157 @@
|
|
1 |
-
# app.py
|
2 |
-
# Gradio web app for Hugging Face Spaces: Bloom's Taxonomy Classifier, Generator, and Rewriter
|
3 |
-
# No external model downloads; purely rule-based and reproducible.
|
4 |
-
|
5 |
-
import re
|
6 |
-
import random
|
7 |
-
from typing import Dict, List, Tuple
|
8 |
-
|
9 |
import gradio as gr
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
"
|
27 |
-
"
|
28 |
-
"
|
29 |
-
|
30 |
-
|
31 |
-
"
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
"
|
36 |
-
"
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
"
|
42 |
-
|
43 |
-
|
44 |
-
"
|
45 |
-
"
|
46 |
-
|
47 |
-
"
|
48 |
-
"
|
49 |
-
"
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
}
|
52 |
|
53 |
-
#
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
]
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
"
|
81 |
-
|
82 |
-
]
|
83 |
-
"
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
"Create": [
|
91 |
-
"Design a novel solution that uses {topic}; outline the key steps.",
|
92 |
-
"Develop a project plan that incorporates {topic} with milestones and deliverables.",
|
93 |
-
"Propose an original framework that extends {topic}.",
|
94 |
-
"Formulate a research question involving {topic} and sketch a method to investigate it.",
|
95 |
-
"Compose a scenario or case study that requires {topic} to resolve.",
|
96 |
-
],
|
97 |
-
}
|
98 |
-
|
99 |
-
# Light penalties/bonuses to help disambiguate borderline levels
|
100 |
-
STRUCTURE_HINTS = {
|
101 |
-
"Remember": ["define", "list", "identify", "name", "state"],
|
102 |
-
"Understand": ["explain", "describe", "summarize", "illustrate", "classify"],
|
103 |
-
"Apply": ["solve", "compute", "calculate", "apply", "use", "demonstrate"],
|
104 |
-
"Analyze": ["compare", "contrast", "analyze", "why", "cause", "effect", "break down"],
|
105 |
-
"Evaluate": ["evaluate", "assess", "justify", "which is better", "argue", "critique"],
|
106 |
-
"Create": ["design", "develop", "propose", "create", "formulate", "invent", "plan"],
|
107 |
-
}
|
108 |
-
|
109 |
-
def normalize(text: str) -> str:
|
110 |
-
return re.sub(r"\s+", " ", text.strip().lower())
|
111 |
-
|
112 |
-
def score_levels(question: str) -> Dict[str, float]:
|
113 |
-
"""Keyword scoring with mild normalization and overlap handling."""
|
114 |
-
q = normalize(question)
|
115 |
-
scores = {lvl: 0.0 for lvl in BLOOMS_LEVELS}
|
116 |
-
# Count verb hits (phrase -> weight)
|
117 |
-
for lvl, words in VERBS.items():
|
118 |
-
for w in words:
|
119 |
-
# phrase-aware search; word boundary for single words
|
120 |
-
if " " in w:
|
121 |
-
if w in q:
|
122 |
-
scores[lvl] += 1.5
|
123 |
-
else:
|
124 |
-
# word-boundary with stemming-ish match for simple endings
|
125 |
-
if re.search(rf"\b{re.escape(w)}(e|ed|ing|s)?\b", q):
|
126 |
-
scores[lvl] += 1.0
|
127 |
-
|
128 |
-
# Structural hints get a small bonus
|
129 |
-
for lvl, hints in STRUCTURE_HINTS.items():
|
130 |
-
for h in hints:
|
131 |
-
if h in q:
|
132 |
-
scores[lvl] += 0.25
|
133 |
-
|
134 |
-
# Heuristic nudges
|
135 |
-
# Longer, multi-part questions tend to be higher-order
|
136 |
-
tokens = len(q.split())
|
137 |
-
if tokens >= 25:
|
138 |
-
scores["Analyze"] += 0.3
|
139 |
-
scores["Evaluate"] += 0.3
|
140 |
-
scores["Create"] += 0.3
|
141 |
-
|
142 |
-
# Presence of "design", "propose", etc. strongly suggests Create
|
143 |
-
if any(k in q for k in ["design", "develop", "propose", "invent", "formulate", "plan"]):
|
144 |
-
scores["Create"] += 1.2
|
145 |
-
|
146 |
-
# Presence of justification language boosts Evaluate
|
147 |
-
if any(k in q for k in ["justify", "assess", "evaluate", "argue", "critique", "recommend"]):
|
148 |
-
scores["Evaluate"] += 1.2
|
149 |
-
|
150 |
-
# "compare/contrast" and "why" boost Analyze
|
151 |
-
if any(k in q for k in ["compare", "contrast", "analyze", "why", "cause", "effect"]):
|
152 |
-
scores["Analyze"] += 0.8
|
153 |
-
|
154 |
-
# Basic who/what/when/where—if no other signal—leans Remember/Understand
|
155 |
-
if re.search(r"\b(who|what|when|where)\b", q):
|
156 |
-
scores["Remember"] += 0.4
|
157 |
-
scores["Understand"] += 0.2
|
158 |
-
|
159 |
-
return scores
|
160 |
-
|
161 |
-
def classify_question(question: str) -> Tuple[str, Dict[str, float], str]:
|
162 |
-
if not question or not question.strip():
|
163 |
-
return "", {}, "Please enter a question to classify."
|
164 |
-
scores = score_levels(question)
|
165 |
-
# pick max; tie-break by deeper levels if scores equal (to avoid under-classification)
|
166 |
-
best_level = max(BLOOMS_LEVELS, key=lambda lvl: (scores[lvl], BLOOMS_LEVELS.index(lvl)))
|
167 |
-
# Rationale: top 2 contributors
|
168 |
-
top = sorted(scores.items(), key=lambda x: x[1], reverse=True)[:2]
|
169 |
-
rationale = "Top signals → " + ", ".join(f"{k}: {v:.2f}" for k, v in top)
|
170 |
-
return best_level, scores, rationale
|
171 |
-
|
172 |
-
def extract_topic_from_question(q: str) -> str:
|
173 |
-
"""Very lightweight topic guesser for rewrite; falls back to original content."""
|
174 |
-
text = normalize(q).rstrip("?.")
|
175 |
-
|
176 |
-
# Prefer phrases after 'about/on/of'
|
177 |
-
m = re.search(r"\b(?:about|on|of)\s+(.+)", text)
|
178 |
-
if m:
|
179 |
-
topic = m.group(1)
|
180 |
-
# trim trailing question cues
|
181 |
-
topic = re.sub(r"\b(why|how|what|when|where|who)\b.*$", "", topic).strip()
|
182 |
-
if topic:
|
183 |
-
return topic
|
184 |
-
|
185 |
-
# Remove leading command verbs
|
186 |
-
command_verbs = sum(VERBS.values(), [])
|
187 |
-
pattern = r"^(" + "|".join(re.escape(v) for v in sorted(set(command_verbs), key=len, reverse=True)) + r")\b"
|
188 |
-
text2 = re.sub(pattern, "", text).strip(",.:- ").strip()
|
189 |
-
if text2:
|
190 |
-
return text2
|
191 |
-
|
192 |
-
# Fallback: keep original question minus punctuation
|
193 |
-
return re.sub(r"[?.!]+$", "", q).strip()
|
194 |
-
|
195 |
-
def generate_questions(topic: str, level: str, n: int) -> str:
|
196 |
-
topic = topic.strip()
|
197 |
if not topic:
|
198 |
-
return "Please
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
" Provide step-by-step reasoning.",
|
216 |
-
]
|
217 |
-
|
218 |
-
attempts = 0
|
219 |
-
while len(out) < n and attempts < n * 10:
|
220 |
-
attempts += 1
|
221 |
-
template = random.choice(templates)
|
222 |
-
extra = random.choice(extras)
|
223 |
-
q = (template.format(topic=topic) + extra).strip()
|
224 |
-
if q not in used:
|
225 |
-
used.add(q)
|
226 |
-
out.append(q)
|
227 |
-
|
228 |
-
# Ensure numbering and distinctness
|
229 |
-
lines = [f"{i+1}. {q}" for i, q in enumerate(out)]
|
230 |
-
return "\n".join(lines)
|
231 |
-
|
232 |
-
def rewrite_question_to_level(question: str, new_level: str) -> str:
|
233 |
-
question = question.strip()
|
234 |
if not question:
|
235 |
-
return "
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
# avoid extremely specific ones when the source already contains constraints
|
244 |
-
generic_first = sorted(candidates, key=lambda s: len(s))
|
245 |
-
template = generic_first[0] if generic_first else "{topic}"
|
246 |
-
|
247 |
-
rewritten = template.format(topic=topic_guess)
|
248 |
-
# Add helpful adaptation notes for higher levels
|
249 |
-
if new_level in ["Analyze", "Evaluate", "Create"]:
|
250 |
-
# Encourage depth without changing grading rubric here
|
251 |
-
rewritten += " Be explicit about your reasoning."
|
252 |
-
|
253 |
-
return rewritten
|
254 |
-
|
255 |
-
# ---- Gradio UI ---------------------------------------------------------------
|
256 |
-
|
257 |
-
EXAMPLES = [
|
258 |
-
["What is photosynthesis and when does it occur?",],
|
259 |
-
["Design a simple experiment to test the effect of sunlight on plant growth.",],
|
260 |
-
["Compare and contrast arrays and linked lists.",],
|
261 |
-
["Justify whether encryption should be mandatory on consumer devices.",],
|
262 |
-
]
|
263 |
-
|
264 |
-
with gr.Blocks(title="Bloom's Taxonomy Helper", theme=gr.themes.Soft()) as demo:
|
265 |
-
gr.Markdown(
|
266 |
-
"""
|
267 |
-
# Bloom’s Taxonomy Helper
|
268 |
-
A compact tool to **classify**, **generate**, and **rewrite** exam questions by Bloom’s level:
|
269 |
-
**Remember · Understand · Apply · Analyze · Evaluate · Create**
|
270 |
-
"""
|
271 |
)
|
|
|
|
|
272 |
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
gr.Markdown("### Paste or type a question. I’ll guess its Bloom’s level.")
|
277 |
-
q_in = gr.Textbox(label="Question", placeholder="e.g., Compare and contrast stack and queue operations.", lines=3)
|
278 |
-
classify_btn = gr.Button("Classify")
|
279 |
-
level_out = gr.Label(label="Predicted Bloom’s level")
|
280 |
-
scores_out = gr.JSON(label="Scores by level (higher = stronger signal)")
|
281 |
-
rationale_out = gr.Markdown()
|
282 |
-
|
283 |
-
def on_classify(q):
|
284 |
-
lvl, scores, rationale = classify_question(q)
|
285 |
-
# Format Label expects mapping; we provide {'label': score}
|
286 |
-
if lvl:
|
287 |
-
return {lvl: 1.0}, scores, f"**Rationale:** {rationale}"
|
288 |
-
else:
|
289 |
-
return {}, {}, rationale
|
290 |
-
|
291 |
-
classify_btn.click(on_classify, inputs=[q_in], outputs=[level_out, scores_out, rationale_out])
|
292 |
-
|
293 |
-
gr.Examples(
|
294 |
-
examples=EXAMPLES,
|
295 |
-
inputs=[q_in],
|
296 |
-
label="Try examples",
|
297 |
-
)
|
298 |
-
|
299 |
-
# --- Tab 2: Generate ---------------------------------------------------
|
300 |
-
with gr.TabItem("Generate"):
|
301 |
-
gr.Markdown("### Enter a topic, choose a Bloom’s level, and how many questions to create.")
|
302 |
-
topic_in = gr.Textbox(label="Topic", placeholder="e.g., Dynamic programming, photosynthesis, supply and demand", lines=2)
|
303 |
-
level_in = gr.Dropdown(choices=BLOOMS_LEVELS, value="Understand", label="Bloom’s level")
|
304 |
-
num_in = gr.Slider(1, 20, value=5, step=1, label="Number of questions")
|
305 |
-
gen_btn = gr.Button("Generate")
|
306 |
-
gen_out = gr.Textbox(label="Generated Questions", lines=12)
|
307 |
-
|
308 |
-
gen_btn.click(
|
309 |
-
fn=lambda topic, level, n: generate_questions(topic, level, int(n)),
|
310 |
-
inputs=[topic_in, level_in, num_in],
|
311 |
-
outputs=[gen_out],
|
312 |
-
)
|
313 |
-
|
314 |
-
# --- Tab 3: Rewrite ----------------------------------------------------
|
315 |
-
with gr.TabItem("Rewrite"):
|
316 |
-
gr.Markdown("### Paste a question and choose a new Bloom’s level. I’ll rewrite it for that cognitive level.")
|
317 |
-
rewrite_in = gr.Textbox(label="Original Question", placeholder="e.g., List the steps of cellular respiration.", lines=3)
|
318 |
-
rewrite_level = gr.Dropdown(choices=BLOOMS_LEVELS, value="Analyze", label="Target Bloom’s level")
|
319 |
-
rewrite_btn = gr.Button("Rewrite")
|
320 |
-
rewrite_out = gr.Textbox(label="Rewritten Question", lines=5)
|
321 |
-
|
322 |
-
rewrite_btn.click(
|
323 |
-
fn=rewrite_question_to_level,
|
324 |
-
inputs=[rewrite_in, rewrite_level],
|
325 |
-
outputs=[rewrite_out],
|
326 |
-
)
|
327 |
-
|
328 |
gr.Markdown(
|
329 |
-
""
|
330 |
-
|
331 |
-
**Notes**
|
332 |
-
- This app uses lightweight heuristics (keyword signals and structure cues) rather than a trained model.
|
333 |
-
- Generation ensures questions are distinct and clearly numbered.
|
334 |
-
- Rewriting keeps the topic while shifting cognitive demand to the selected Bloom’s level.
|
335 |
-
"""
|
336 |
)
|
337 |
|
338 |
-
|
339 |
-
|
340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
+
|
4 |
+
TITLE = "Bloom’s Taxonomy Helper (Classify + Generate + Rewrite)"
|
5 |
+
|
6 |
+
# ------------------ Bloom labels ------------------
|
7 |
+
LABELS = ["Remember", "Understand", "Apply", "Analyze", "Evaluate", "Create"]
|
8 |
+
|
9 |
+
# ------------------ Generation templates by Bloom ------------------
|
10 |
+
PROMPT_TEMPLATES = {
|
11 |
+
"Remember": (
|
12 |
+
"Write {n} distinct factual recall questions for college students.\n"
|
13 |
+
"Topic: {topic}\n"
|
14 |
+
"Cognitive focus: Remember (recognize/recall facts and terms).\n"
|
15 |
+
"Style: Use 'What', 'When', 'Define', or 'List'.\n"
|
16 |
+
"Do not include answers. Number each on its own line."
|
17 |
+
),
|
18 |
+
"Understand": (
|
19 |
+
"Write {n} distinct comprehension questions for college students.\n"
|
20 |
+
"Topic: {topic}\n"
|
21 |
+
"Cognitive focus: Understand (explain, summarize, interpret).\n"
|
22 |
+
"Style: Use 'Explain', 'Summarize', 'Describe', or 'Give an example of'.\n"
|
23 |
+
"Do not include answers. Number each on its own line."
|
24 |
+
),
|
25 |
+
"Apply": (
|
26 |
+
"Write {n} distinct application questions for college students.\n"
|
27 |
+
"Topic: {topic}\n"
|
28 |
+
"Cognitive focus: Apply (use procedures, compute, demonstrate use).\n"
|
29 |
+
"Style: Ask students to calculate, implement, or solve with concrete data.\n"
|
30 |
+
"Avoid 'define' or 'explain' prompts. Do not include answers. Number each on its own line."
|
31 |
+
),
|
32 |
+
"Analyze": (
|
33 |
+
"Write {n} distinct analysis questions for college students.\n"
|
34 |
+
"Topic: {topic}\n"
|
35 |
+
"Cognitive focus: Analyze (compare/contrast, break down relationships, cause–effect).\n"
|
36 |
+
"Style: Use 'Compare', 'Differentiate', 'Explain why', 'Break down', or 'Trace'.\n"
|
37 |
+
"Do not include answers. Number each on its own line."
|
38 |
+
),
|
39 |
+
"Evaluate": (
|
40 |
+
"Write {n} distinct evaluation questions for college students.\n"
|
41 |
+
"Topic: {topic}\n"
|
42 |
+
"Cognitive focus: Evaluate (judge, critique, justify with criteria and evidence).\n"
|
43 |
+
"Style: Use 'Argue', 'Defend', 'Critique', 'Which is better and why', or 'Assess'.\n"
|
44 |
+
"Do not include answers. Number each on its own line."
|
45 |
+
),
|
46 |
+
"Create": (
|
47 |
+
"Write {n} distinct creation/synthesis tasks for college students.\n"
|
48 |
+
"Topic: {topic}\n"
|
49 |
+
"Cognitive focus: Create (design, plan, invent, propose, produce a novel artifact).\n"
|
50 |
+
"Style: Use 'Design', 'Propose', 'Develop', 'Compose', or 'Build'. Include realistic constraints.\n"
|
51 |
+
"Do not include answers. Number each on its own line."
|
52 |
+
),
|
53 |
}
|
54 |
|
55 |
+
# ------------------ Pipelines ------------------
|
56 |
+
# Zero-shot classifier (no training)
|
57 |
+
clf = pipeline("zero-shot-classification", model="typeform/distilbert-base-uncased-mnli")
|
58 |
+
|
59 |
+
# Higher-quality generator (CPU-friendly but better than *small*)
|
60 |
+
GEN_MODEL = "google/flan-t5-base"
|
61 |
+
gen_tok = AutoTokenizer.from_pretrained(GEN_MODEL)
|
62 |
+
gen_mdl = AutoModelForSeq2SeqLM.from_pretrained(GEN_MODEL)
|
63 |
+
gen = pipeline("text2text-generation", model=gen_mdl, tokenizer=gen_tok)
|
64 |
+
|
65 |
+
# ------------------ Helpers ------------------
|
66 |
+
def clean_numbering(text: str, n: int) -> str:
|
67 |
+
"""Force clean 1..n numbering, each on its own line."""
|
68 |
+
lines = [ln.strip(" -*\t") for ln in text.splitlines() if ln.strip()]
|
69 |
+
# If model returned a paragraph, split by sentence end
|
70 |
+
if len(lines) == 1 and "1." not in lines[0]:
|
71 |
+
import re
|
72 |
+
parts = [p.strip() for p in re.split(r"(?<=[.?!])\s+", lines[0]) if p.strip()]
|
73 |
+
lines = parts
|
74 |
+
lines = [ln for ln in lines if any(c.isalpha() for c in ln)]
|
75 |
+
lines = lines[:max(1, n)]
|
76 |
+
return "\n".join(f"{i+1}. {ln.lstrip('0123456789. ').strip()}" for i, ln in enumerate(lines))
|
77 |
+
|
78 |
+
# ------------------ Functions ------------------
|
79 |
+
def classify_bloom(question: str):
|
80 |
+
question = (question or "").strip()
|
81 |
+
if not question:
|
82 |
+
return "", ""
|
83 |
+
res = clf(question, LABELS)
|
84 |
+
labels = res["labels"]
|
85 |
+
scores = [round(float(s), 3) for s in res["scores"]]
|
86 |
+
top = labels[0] if labels else ""
|
87 |
+
table = "\n".join(f"{l}: {s}" for l, s in zip(labels, scores))
|
88 |
+
return top, table
|
89 |
+
|
90 |
+
def generate_questions(topic: str, level: str, n: int, creativity: float):
|
91 |
+
topic = (topic or "").strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
if not topic:
|
93 |
+
return "Please enter a topic."
|
94 |
+
template = PROMPT_TEMPLATES.get(level, PROMPT_TEMPLATES["Understand"])
|
95 |
+
prompt = template.format(n=int(n), topic=topic)
|
96 |
+
|
97 |
+
out = gen(
|
98 |
+
prompt,
|
99 |
+
max_new_tokens=180,
|
100 |
+
do_sample=(creativity > 0.01),
|
101 |
+
temperature=max(0.01, min(1.2, creativity)),
|
102 |
+
top_p=0.9,
|
103 |
+
num_beams=1,
|
104 |
+
)[0]["generated_text"]
|
105 |
+
|
106 |
+
return clean_numbering(out, int(n))
|
107 |
+
|
108 |
+
def rewrite_level(question: str, target_level: str):
|
109 |
+
question = (question or "").strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
if not question:
|
111 |
+
return "Paste a question to rewrite."
|
112 |
+
# Leverage the template for target level to steer rewriting
|
113 |
+
template = PROMPT_TEMPLATES.get(target_level, PROMPT_TEMPLATES["Understand"])
|
114 |
+
prompt = (
|
115 |
+
f"{template}\n\n"
|
116 |
+
f"Transform the following single question to match the level above. Keep it concise and do not include the answer.\n"
|
117 |
+
f"Original: {question}\n"
|
118 |
+
f"Return exactly 1 numbered question."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
)
|
120 |
+
out = gen(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
|
121 |
+
return clean_numbering(out, 1)
|
122 |
|
123 |
+
# ------------------ UI ------------------
|
124 |
+
with gr.Blocks(title=TITLE) as demo:
|
125 |
+
gr.Markdown(f"# {TITLE}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
gr.Markdown(
|
127 |
+
"Classify questions by Bloom level, generate new questions aligned to a level, "
|
128 |
+
"and rewrite a question to a different level. Runs fully on open models."
|
|
|
|
|
|
|
|
|
|
|
129 |
)
|
130 |
|
131 |
+
with gr.Tab("Classify"):
|
132 |
+
q = gr.Textbox(
|
133 |
+
label="Enter a question",
|
134 |
+
lines=4,
|
135 |
+
placeholder="e.g., Explain why randomized controlled trials reduce bias."
|
136 |
+
)
|
137 |
+
top = gr.Textbox(label="Predicted Bloom level", interactive=False)
|
138 |
+
scores = gr.Textbox(label="All scores", interactive=False)
|
139 |
+
gr.Button("Classify").click(classify_bloom, [q], [top, scores])
|
140 |
+
|
141 |
+
with gr.Tab("Generate"):
|
142 |
+
with gr.Row():
|
143 |
+
topic = gr.Textbox(label="Topic", value="binary numbers in computer science")
|
144 |
+
level = gr.Dropdown(LABELS, value="Apply", label="Bloom level")
|
145 |
+
with gr.Row():
|
146 |
+
n = gr.Slider(1, 10, value=5, step=1, label="How many questions")
|
147 |
+
creativity = gr.Slider(0.0, 1.2, value=0.6, step=0.1, label="Creativity (temperature)")
|
148 |
+
out = gr.Textbox(label="Generated questions", lines=12)
|
149 |
+
gr.Button("Generate").click(generate_questions, [topic, level, n, creativity], out)
|
150 |
+
|
151 |
+
with gr.Tab("Rewrite"):
|
152 |
+
q2 = gr.Textbox(label="Original question", lines=4, value="Define binary number.")
|
153 |
+
target = gr.Dropdown(LABELS, value="Analyze", label="Target Bloom level")
|
154 |
+
out2 = gr.Textbox(label="Rewritten question", lines=4)
|
155 |
+
gr.Button("Rewrite").click(rewrite_level, [q2, target], out2)
|
156 |
+
|
157 |
+
demo.launch()
|