Spaces:
Sleeping
Sleeping
updated project files
Browse files- app.py +173 -0
- requirements.txt +83 -0
- utils.py +295 -0
app.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from utils import driver, driver_resource
|
3 |
+
|
4 |
+
# -----------------------------
|
5 |
+
# Callback for initial suggestion
|
6 |
+
# -----------------------------
|
7 |
+
def on_submit(age, background, interest):
|
8 |
+
# Get LLM-generated study plan, reason, and resources
|
9 |
+
diagram, reason, outcome = driver(age, background, interest)
|
10 |
+
|
11 |
+
return (
|
12 |
+
gr.update(value=diagram, visible=True), # studyflow_diagram
|
13 |
+
gr.update(value=reason, visible=True), # topic_reason
|
14 |
+
gr.update(value=outcome, visible=True), # topic_outcome
|
15 |
+
gr.update(visible=False), # hide submit
|
16 |
+
gr.update(visible=True), # show feedback section
|
17 |
+
gr.update(visible=True), # show revised submission button
|
18 |
+
gr.update(visible=True), # show original section
|
19 |
+
gr.update(visible=False), # hide revised section
|
20 |
+
gr.update(visible=True), # show resource button
|
21 |
+
)
|
22 |
+
|
23 |
+
# -----------------------------
|
24 |
+
# Callback for revised suggestion based on feedback
|
25 |
+
# -----------------------------
|
26 |
+
def on_feedback(age, background, interest, userFeedback):
|
27 |
+
# Optionally send feedback to model
|
28 |
+
diagram, reason, outcome = driver(age, background, interest, userFeedback)
|
29 |
+
|
30 |
+
return (
|
31 |
+
gr.update(value=diagram, visible=True), # revisedStudyflow_diagram
|
32 |
+
gr.update(value=reason, visible=True), # revisedTopic_reason
|
33 |
+
gr.update(value=outcome, visible=True), # revisedTopic_outcome
|
34 |
+
gr.update(visible=False), # hide submitWithFeedback
|
35 |
+
gr.update(visible=False), # hide original section
|
36 |
+
gr.update(visible=True), # show revised section
|
37 |
+
)
|
38 |
+
|
39 |
+
# -----------------------------
|
40 |
+
# Helpers for dummy resource button
|
41 |
+
# -----------------------------
|
42 |
+
# def getResouces():
|
43 |
+
# return "Here are some resources to help you get started with your learning path."
|
44 |
+
|
45 |
+
def getResouces():
|
46 |
+
resources, questions = driver_resource()
|
47 |
+
return resources, questions
|
48 |
+
|
49 |
+
def on_Resource():
|
50 |
+
resources, questions = driver_resource()
|
51 |
+
return (
|
52 |
+
resources,
|
53 |
+
questions,
|
54 |
+
gr.update(visible=True), # Show resource section
|
55 |
+
gr.update(visible=False), # Hide resource button
|
56 |
+
gr.update(visible=False), # Hide feedback button
|
57 |
+
)
|
58 |
+
|
59 |
+
# -----------------------------
|
60 |
+
# UI with Gradio Blocks
|
61 |
+
# -----------------------------
|
62 |
+
with gr.Blocks(css="""
|
63 |
+
#scrollable-md {
|
64 |
+
max-height: 350px;
|
65 |
+
border: 1px solid #999;
|
66 |
+
overflow-y: auto;
|
67 |
+
}
|
68 |
+
#original_section{
|
69 |
+
background-color: transparent !important;
|
70 |
+
}
|
71 |
+
|
72 |
+
""") as demo:
|
73 |
+
gr.Markdown("# π LearnFlow")
|
74 |
+
gr.Markdown("""πΉ **πΊοΈ Personalized Study Workflow** πΉ **π§ Meaningful Reasoning & Outcomes** πΉ **π Beginner-Friendly Resources** πΉ **β Grasp Check Questions** """)
|
75 |
+
|
76 |
+
with gr.Row():
|
77 |
+
with gr.Column(scale=1):
|
78 |
+
with gr.Group(elem_id="input-section", visible=True):
|
79 |
+
age = gr.Number(label="πΆ Your Age", value=18)
|
80 |
+
background = gr.Textbox(label="π Your Educational Background")
|
81 |
+
interest = gr.Textbox(label="π‘ Your Interests")
|
82 |
+
|
83 |
+
submit = gr.Button("π Suggest What to Learn", visible=True, elem_classes="gr-button")
|
84 |
+
|
85 |
+
with gr.Group(elem_id="feedback-section", visible=False) as feedback_section:
|
86 |
+
userFeedback = gr.Textbox(label="βΉοΈ Help us know what youβre looking for")
|
87 |
+
submitWithFeeback = gr.Button("π Suggest What to Learn (Revised)", elem_classes="gr-button")
|
88 |
+
|
89 |
+
resource_button = gr.Button("π Click to get Resource", visible=False, elem_classes="gr-button")
|
90 |
+
|
91 |
+
with gr.Column(scale=2):
|
92 |
+
# REVISED Section (initially hidden)
|
93 |
+
with gr.Group(visible=False) as revised_section:
|
94 |
+
gr.Markdown("### π Revised Recommendation")
|
95 |
+
with gr.Row():
|
96 |
+
revisedStudyflow_diagram = gr.Markdown(
|
97 |
+
value="<p>No recommendation yet.</p>",
|
98 |
+
label="π The flow you are looking for",
|
99 |
+
elem_id="scrollable-md",
|
100 |
+
visible=False
|
101 |
+
)
|
102 |
+
with gr.Column():
|
103 |
+
revisedTopic_reason = gr.Textbox(label="π§ Why this topic", lines=5, max_lines=5, visible=False)
|
104 |
+
revisedTopic_outcome = gr.Textbox(label="πͺ Outcome of this topic", lines=5, max_lines=5, visible=False)
|
105 |
+
|
106 |
+
# ORIGINAL Section (initially visible)
|
107 |
+
with gr.Group(visible=True) as original_section:
|
108 |
+
gr.Markdown("### π Recommendation")
|
109 |
+
with gr.Row():
|
110 |
+
studyflow_diagram = gr.Markdown(
|
111 |
+
value="<p>No recommendation yet.</p>",
|
112 |
+
label="π The flow you are looking for",
|
113 |
+
elem_id="scrollable-md",
|
114 |
+
visible=False
|
115 |
+
)
|
116 |
+
with gr.Column():
|
117 |
+
topic_reason = gr.Textbox(label="π§ Why this topic", lines=5, max_lines=5, visible=False)
|
118 |
+
topic_outcome = gr.Textbox(label="πͺ Outcome of this topic", lines=5, max_lines=5, visible=False)
|
119 |
+
|
120 |
+
# RESOURCES Section (initially hidden)
|
121 |
+
with gr.Group(elem_id="resource-section", visible=False) as resource_section:
|
122 |
+
gr.Markdown("### π Learning Resources")
|
123 |
+
learningResource = gr.Textbox(label="Resources to help you get started")
|
124 |
+
graspCheck = gr.Textbox(label="Grasp Check Questions")
|
125 |
+
|
126 |
+
# ----------- Events ------------
|
127 |
+
|
128 |
+
submit.click(
|
129 |
+
fn=on_submit,
|
130 |
+
inputs=[age, background, interest],
|
131 |
+
outputs=[
|
132 |
+
studyflow_diagram,
|
133 |
+
topic_reason,
|
134 |
+
topic_outcome,
|
135 |
+
submit,
|
136 |
+
feedback_section,
|
137 |
+
submitWithFeeback,
|
138 |
+
original_section,
|
139 |
+
revised_section,
|
140 |
+
resource_button,
|
141 |
+
],
|
142 |
+
queue=True
|
143 |
+
)
|
144 |
+
|
145 |
+
submitWithFeeback.click(
|
146 |
+
fn=on_feedback,
|
147 |
+
inputs=[age, background, interest, userFeedback],
|
148 |
+
outputs=[
|
149 |
+
revisedStudyflow_diagram,
|
150 |
+
revisedTopic_reason,
|
151 |
+
revisedTopic_outcome,
|
152 |
+
submitWithFeeback,
|
153 |
+
original_section,
|
154 |
+
revised_section,
|
155 |
+
],
|
156 |
+
queue=True
|
157 |
+
)
|
158 |
+
|
159 |
+
resource_button.click(
|
160 |
+
fn=on_Resource,
|
161 |
+
inputs=[],
|
162 |
+
outputs=[
|
163 |
+
learningResource,
|
164 |
+
graspCheck,
|
165 |
+
resource_section,
|
166 |
+
resource_button,
|
167 |
+
submitWithFeeback
|
168 |
+
],
|
169 |
+
queue=True
|
170 |
+
)
|
171 |
+
|
172 |
+
# Launch the app
|
173 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==24.1.0
|
2 |
+
annotated-types==0.7.0
|
3 |
+
anyio==4.9.0
|
4 |
+
asttokens==3.0.0
|
5 |
+
audioop-lts==0.2.1
|
6 |
+
certifi==2025.4.26
|
7 |
+
charset-normalizer==3.4.2
|
8 |
+
click==8.2.1
|
9 |
+
colorama==0.4.6
|
10 |
+
contourpy==1.3.2
|
11 |
+
cycler==0.12.1
|
12 |
+
decorator==5.2.1
|
13 |
+
distro==1.9.0
|
14 |
+
dotenv==0.9.9
|
15 |
+
executing==2.2.0
|
16 |
+
fastapi==0.115.12
|
17 |
+
ffmpy==0.6.0
|
18 |
+
filelock==3.18.0
|
19 |
+
fonttools==4.58.2
|
20 |
+
fsspec==2025.5.1
|
21 |
+
gradio==5.33.0
|
22 |
+
gradio-client==1.10.2
|
23 |
+
groovy==0.1.2
|
24 |
+
h11==0.16.0
|
25 |
+
httpcore==1.0.9
|
26 |
+
httpx==0.28.1
|
27 |
+
huggingface-hub==0.32.4
|
28 |
+
idna==3.10
|
29 |
+
ipython==9.3.0
|
30 |
+
ipython-pygments-lexers==1.1.1
|
31 |
+
jedi==0.19.2
|
32 |
+
jinja2==3.1.6
|
33 |
+
jiter==0.10.0
|
34 |
+
jsonpickle==4.1.1
|
35 |
+
kiwisolver==1.4.8
|
36 |
+
markdown-it-py==3.0.0
|
37 |
+
markupsafe==3.0.2
|
38 |
+
matplotlib==3.10.3
|
39 |
+
matplotlib-inline==0.1.7
|
40 |
+
mdurl==0.1.2
|
41 |
+
networkx==3.5
|
42 |
+
numpy==2.2.6
|
43 |
+
openai==1.84.0
|
44 |
+
orjson==3.10.18
|
45 |
+
packaging==25.0
|
46 |
+
pandas==2.3.0
|
47 |
+
parso==0.8.4
|
48 |
+
pillow==11.2.1
|
49 |
+
prompt-toolkit==3.0.51
|
50 |
+
pure-eval==0.2.3
|
51 |
+
pydantic==2.11.5
|
52 |
+
pydantic-core==2.33.2
|
53 |
+
pydub==0.25.1
|
54 |
+
pygments==2.19.1
|
55 |
+
pyparsing==3.2.3
|
56 |
+
python-dateutil==2.9.0.post0
|
57 |
+
python-dotenv==1.1.0
|
58 |
+
python-multipart==0.0.20
|
59 |
+
pytz==2025.2
|
60 |
+
pyvis==0.3.2
|
61 |
+
pyyaml==6.0.2
|
62 |
+
requests==2.32.3
|
63 |
+
rich==14.0.0
|
64 |
+
ruff==0.11.13
|
65 |
+
safehttpx==0.1.6
|
66 |
+
semantic-version==2.10.0
|
67 |
+
shellingham==1.5.4
|
68 |
+
six==1.17.0
|
69 |
+
sniffio==1.3.1
|
70 |
+
stack-data==0.6.3
|
71 |
+
starlette==0.46.2
|
72 |
+
temp==2020.7.2
|
73 |
+
tomlkit==0.13.3
|
74 |
+
tqdm==4.67.1
|
75 |
+
traitlets==5.14.3
|
76 |
+
typer==0.16.0
|
77 |
+
typing-extensions==4.14.0
|
78 |
+
typing-inspection==0.4.1
|
79 |
+
tzdata==2025.2
|
80 |
+
urllib3==2.4.0
|
81 |
+
uvicorn==0.34.3
|
82 |
+
wcwidth==0.2.13
|
83 |
+
websockets==15.0.1
|
utils.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from openai import OpenAI
|
4 |
+
from typing import List, Dict, Union
|
5 |
+
from pydantic import BaseModel, ValidationError
|
6 |
+
|
7 |
+
# === OpenAI Client Initialization ===
|
8 |
+
client = OpenAI(
|
9 |
+
api_key=os.getenv("SAMBANOVA_KEY"),
|
10 |
+
base_url="https://api.sambanova.ai/v1",
|
11 |
+
)
|
12 |
+
# === Sample Data for Testing ===
|
13 |
+
sample_studyflow = {'Machine Learning Fundamentals': ['Introduction to Machine Learning', 'Types of Machine Learning', 'Model Evaluation Metrics', 'Overfitting and Underfitting'], 'Deep Learning with Python': ['Introduction to Neural Networks', 'Convolutional Neural Networks (CNNs)', 'Recurrent Neural Networks (RNNs)', 'Transfer Learning'], 'Large Language Models': ['Introduction to Natural Language Processing (NLP)', 'Language Model Architectures', 'Transformers and Attention Mechanisms', 'Fine-Tuning Pre-Trained Models'], 'Pattern Recognition': ['Introduction to Pattern Recognition', 'Supervised and Unsupervised Learning', 'Clustering Algorithms', 'Dimensionality Reduction Techniques'], 'Model Deployment': ['Introduction to Model Deployment', 'Model Serving and Monitoring', 'Containerization with Docker', 'Cloud Deployment Options']}
|
14 |
+
sample_reason = "Given your background in computer science engineering and interests in machine learning, large language models, and pattern recognition, this plan dives into the fundamentals of machine learning and deep learning, with a focus on practical applications in Python."
|
15 |
+
sample_outcome = "After completing this plan, you will be able to design, train, and deploy machine learning models, including large language models, and apply pattern recognition techniques to real-world problems. You will also understand how to evaluate and fine-tune your models for optimal performance."
|
16 |
+
sample_resource = ['Machine Learning Crash Course - YouTube by Google Developers', 'Deep Learning with Python - Book by FranΓ§ois Chollet', 'Natural Language Processing with Python - Book by Steven Bird, Ewan Klein, and Edward Loper']
|
17 |
+
|
18 |
+
_cached_data = {
|
19 |
+
"reason": sample_reason,
|
20 |
+
"expected_outcome": None,
|
21 |
+
"resources": None
|
22 |
+
}
|
23 |
+
# === Connector to Frontend ===
|
24 |
+
def driver(age: int, background: str, interest: str, feedback: Union[str, None] = None):
|
25 |
+
_, study_plan_response = get_learning_suggestion(client, age, background, interest, feedback)
|
26 |
+
|
27 |
+
# Save the response in the cache
|
28 |
+
_cached_data["reason"] = study_plan_response.reason
|
29 |
+
_cached_data["expected_outcome"] = study_plan_response.expected_outcome
|
30 |
+
_cached_data["resources"] = study_plan_response.resources
|
31 |
+
|
32 |
+
study_workflow_diagram = get_studyflow_diagram(study_plan_response.study_workflow)
|
33 |
+
# if feedback:
|
34 |
+
# feedback_interpretation = interpret_feedback(feedback)
|
35 |
+
# print(f"Feedback interpretation: {feedback_interpretation}")
|
36 |
+
|
37 |
+
return study_workflow_diagram, study_plan_response.reason, study_plan_response.expected_outcome
|
38 |
+
|
39 |
+
def driver_resource():
|
40 |
+
reason= _cached_data["reason"]
|
41 |
+
expected_outcome = _cached_data["expected_outcome"]
|
42 |
+
resources = _cached_data["resources"]
|
43 |
+
|
44 |
+
questions = build_grasp_check(reason, expected_outcome, resources)
|
45 |
+
|
46 |
+
# both are lists, so print it like they points and question
|
47 |
+
formated_resources = "\n".join([f"- {resource}" for resource in resources])
|
48 |
+
formated_questions = "\n".join([f"{question}" for i, question in enumerate(questions)])
|
49 |
+
|
50 |
+
return formated_resources, formated_questions
|
51 |
+
|
52 |
+
# === Pydantic Models for Structured Output ===
|
53 |
+
class StudyPlan(BaseModel):
|
54 |
+
study_workflow: Dict[str, List[str]]
|
55 |
+
reason: str
|
56 |
+
expected_outcome: str
|
57 |
+
resources: List[str]
|
58 |
+
|
59 |
+
class ClarificationRequest(BaseModel):
|
60 |
+
follow_up_question: str
|
61 |
+
|
62 |
+
class GraspCheck(BaseModel):
|
63 |
+
questions: List[str]
|
64 |
+
|
65 |
+
# === System Prompt ===
|
66 |
+
SYSTEM_PROMPT = """
|
67 |
+
You are a smart educational guide agent.
|
68 |
+
You help people figure out what to learn next based on their age, background, and interests.
|
69 |
+
Be adaptive: if input is too vague, ask for clarification. If it's clear, give them:
|
70 |
+
1. A study_workflow - a roadmap of topics and subtopics.
|
71 |
+
2. A reason why it's the right path for the user.
|
72 |
+
3. An expected outcome after finishing this learning path.
|
73 |
+
4. Beginner-friendly resources.
|
74 |
+
Use simple and clear language.
|
75 |
+
Always respond in strict JSON.
|
76 |
+
"""
|
77 |
+
|
78 |
+
# === User Prompt Generator ===
|
79 |
+
def build_user_prompt(age, background, interest, feedback=None):
|
80 |
+
feedback_note = f"\n- Additional Feedback from User: {feedback}" if feedback else ""
|
81 |
+
return f"""
|
82 |
+
You are an expert curriculum advisor.
|
83 |
+
|
84 |
+
### User Profile
|
85 |
+
- Age: {age}
|
86 |
+
- Educational Background: {background}
|
87 |
+
- Interests: {interest}{feedback_note}
|
88 |
+
|
89 |
+
### Your Task:
|
90 |
+
Generate a structured learning plan in **strict JSON format only**, without any extra text or markdown.
|
91 |
+
|
92 |
+
Your output must include:
|
93 |
+
1. **study_workflow**: a Python-style dictionary (JSON-safe)
|
94 |
+
- Keys: main topics relevant to the user's profile
|
95 |
+
- Values: 2β5 subtopics per main topic, written as a list of strings ordered from beginner to advanced
|
96 |
+
|
97 |
+
2. **reason**: 3-4 clear sentence explaining "why" this path fits the user's background and interests. Avoid overly technical or overly vague language. Match the tone to their background.
|
98 |
+
3. **expected_outcome**: 3β4 sentences describing what the user will *be able to do* by the end. Be specific, realistic, and motivating. Avoid overly technical or overly vague language. Match the tone to their background.
|
99 |
+
4. **resources**: list of 3β4 beginner-friendly materials
|
100 |
+
|
101 |
+
### VERY IMPORTANT:
|
102 |
+
- Talk directly to the user, not in third person.
|
103 |
+
- Do NOT return more than 5 main topics
|
104 |
+
- Do NOT return more than 5 subtopics per main topic
|
105 |
+
- Do NOT return more than 3 resources
|
106 |
+
- Do NOT include explanations outside the JSON
|
107 |
+
- Do NOT use markdown code blocks like ```json
|
108 |
+
- Only output valid JSON
|
109 |
+
|
110 |
+
### Output Example:
|
111 |
+
{{
|
112 |
+
"study_workflow": {{
|
113 |
+
"Start with Python": ["Variables and Data Types", "Loops", "Functions", "Error Handling"],
|
114 |
+
"Data Structures": ["Lists", "Dictionaries", "Tuples", "Sets"],
|
115 |
+
"NumPy": ["Arrays", "Array Operations", "Broadcasting"],
|
116 |
+
"Pandas": ["Series and DataFrames", "Filtering and Sorting", "Basic Data Cleaning"],
|
117 |
+
"Matplotlib": ["Line Charts", "Bar Charts", "Histograms"]
|
118 |
+
}},
|
119 |
+
"reason": "Since you are new to programming and interested in data-related topics, this plan starts with Python basics and gradually introduces tools used in real data analysis projects.",
|
120 |
+
"expected_outcome": "After completing this plan, you will understand the fundamentals of Python and be able to explore and analyze real-world datasets using tools like Pandas and Matplotlib. You wil be able to write small scripts to automate tasks, clean data, and create visual summaries.",
|
121 |
+
"resources": [
|
122 |
+
"Python for Beginners - YouTube by freeCodeCamp",
|
123 |
+
"CS50βs Introduction to Computer Science",
|
124 |
+
"Kaggle: Python Course"
|
125 |
+
]
|
126 |
+
}}
|
127 |
+
|
128 |
+
### If the user profile is too vague to proceed:
|
129 |
+
Return this JSON instead:
|
130 |
+
{{
|
131 |
+
"follow_up_question": "Ask a specific question to clarify what the user needs"
|
132 |
+
}}
|
133 |
+
"""
|
134 |
+
|
135 |
+
# === GPT Driver ===
|
136 |
+
def get_learning_suggestion(client, age, background, interest, feedback=None):
|
137 |
+
user_prompt = build_user_prompt(age, background, interest, feedback)
|
138 |
+
|
139 |
+
try:
|
140 |
+
completion = client.chat.completions.create(
|
141 |
+
model="Meta-Llama-3.1-405B-Instruct",
|
142 |
+
messages=[
|
143 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
144 |
+
{"role": "user", "content": user_prompt},
|
145 |
+
],
|
146 |
+
)
|
147 |
+
raw_response = completion.choices[0].message.content.strip()
|
148 |
+
print("π Raw LLM Response:", raw_response)
|
149 |
+
|
150 |
+
# Validate against expected schemas
|
151 |
+
try:
|
152 |
+
response_json = json.loads(raw_response)
|
153 |
+
except json.JSONDecodeError as e:
|
154 |
+
raise ValueError("Invalid JSON from model")
|
155 |
+
|
156 |
+
if "follow_up_question" in response_json:
|
157 |
+
follow_up = ClarificationRequest(**response_json)
|
158 |
+
return "clarify", follow_up.follow_up_question
|
159 |
+
|
160 |
+
study_plan = StudyPlan(**response_json)
|
161 |
+
return "complete", study_plan
|
162 |
+
|
163 |
+
except Exception as e:
|
164 |
+
print(f"β Error occurred: {e}")
|
165 |
+
return "error", str(e)
|
166 |
+
|
167 |
+
# === Feedback Interpreter Layer ===
|
168 |
+
def interpret_feedback(feedback_text):
|
169 |
+
""" You can use a small model or rule-based classifier to tag feedback. """
|
170 |
+
keywords = {
|
171 |
+
"too advanced": "make it easier",
|
172 |
+
"too basic": "increase complexity",
|
173 |
+
"add resources": "expand resources",
|
174 |
+
"missing outcome": "add outcome"
|
175 |
+
}
|
176 |
+
interpreted = [keywords[k] for k in keywords if k in feedback_text.lower()]
|
177 |
+
return ", ".join(interpreted) if interpreted else feedback_text
|
178 |
+
|
179 |
+
def build_grasp_check(reason: str, outcome: str, resources: List[str]) -> List[str]:
|
180 |
+
"""
|
181 |
+
Make an LLM request to generate 5β10 comprehension-check questions
|
182 |
+
based on the user's reason, desired outcome, and resources.
|
183 |
+
"""
|
184 |
+
# Create a prompt that instructs the model to write 5β10 questions.
|
185 |
+
prompt = f"""
|
186 |
+
You are a helpful AI tutor. The user is learning because:
|
187 |
+
{reason}
|
188 |
+
|
189 |
+
Their desired outcome is:
|
190 |
+
{outcome}
|
191 |
+
|
192 |
+
They have these resources:
|
193 |
+
{resources}
|
194 |
+
|
195 |
+
Please generate 5 to 10 short questions that the user could answer
|
196 |
+
after studying these materials, to check their overal understanding.
|
197 |
+
Return only the list of questions.
|
198 |
+
|
199 |
+
"""
|
200 |
+
|
201 |
+
# Make your LLM (OpenAI, SambaNova, etc.) request here:
|
202 |
+
# (Change this to your actual function call and model parameters.)
|
203 |
+
completion = client.chat.completions.create(
|
204 |
+
model="Meta-Llama-3.1-405B-Instruct",
|
205 |
+
messages=[
|
206 |
+
{"role": "system", "content": "You are a question setter whoes objective is to test learners overall understanding of the topic, not specifics "},
|
207 |
+
{"role": "user", "content": prompt},
|
208 |
+
],
|
209 |
+
|
210 |
+
)
|
211 |
+
|
212 |
+
# Get the raw text from the response
|
213 |
+
response = completion.choices[0].message.content.strip()
|
214 |
+
print("π Grasp Check Questions:", response) # Debug print
|
215 |
+
|
216 |
+
try:
|
217 |
+
# If the LLM returned JSON: parse it
|
218 |
+
# e.g. questions_list = json.loads(response_text)
|
219 |
+
# Or if it returned newline-separated text:
|
220 |
+
questions_list = [line.strip("- ").strip() for line in response.splitlines() if line.strip()]
|
221 |
+
|
222 |
+
# Validate with Pydantic
|
223 |
+
validated = GraspCheck(questions=questions_list)
|
224 |
+
return validated.questions
|
225 |
+
|
226 |
+
except (ValidationError, json.JSONDecodeError) as e:
|
227 |
+
print("Error parsing or validating questions:", e)
|
228 |
+
return []
|
229 |
+
|
230 |
+
# === Studyflow Preparation ===
|
231 |
+
def convert_studyflow_to_mermaid_text(studyflow):
|
232 |
+
"""
|
233 |
+
Convert a structured learning workflow dictionary to step titles and details for
|
234 |
+
Mermaid diagram generation.
|
235 |
+
"""
|
236 |
+
step_titles = []
|
237 |
+
step_details = []
|
238 |
+
|
239 |
+
for topic, subtopics in studyflow.items():
|
240 |
+
step_titles.append(topic)
|
241 |
+
step_details.append(", ".join(subtopics))
|
242 |
+
|
243 |
+
step_titles = ", ".join(step_titles)
|
244 |
+
step_details = " | ".join(step_details)
|
245 |
+
|
246 |
+
# return ", ".join(step_titles), " | ".join(step_details)
|
247 |
+
print(step_titles, step_details)
|
248 |
+
return step_titles, step_details
|
249 |
+
|
250 |
+
|
251 |
+
def get_studyflow_diagram(studyflow):
|
252 |
+
"""
|
253 |
+
Expected inputs:
|
254 |
+
- step_titles: A comma-separated string (e.g., "Learn Python, Learn NumPy, Learn Pandas")
|
255 |
+
- step_details: A string with each step's details separated by |,
|
256 |
+
and details for a specific step separated by commas.
|
257 |
+
(e.g., "Variables, Loops, Functions | Arrays, Vector Math | DataFrames, Analysis")
|
258 |
+
"""
|
259 |
+
step_titles, step_details = convert_studyflow_to_mermaid_text(studyflow)
|
260 |
+
# Process input strings into lists.
|
261 |
+
titles = [title.strip() for title in step_titles.split(",")]
|
262 |
+
details_list = [details.strip() for details in step_details.split("|")]
|
263 |
+
|
264 |
+
# Define a list of colors to be used for the nodes.
|
265 |
+
# You can add as many colors as you like.
|
266 |
+
colors = ["#f9c74f", "#90be6d", "#f9844a", "#577590", "#277da1", "#ff595e", "#ffd166"]
|
267 |
+
|
268 |
+
mermaid_code = "graph TD;\n"
|
269 |
+
previous_step = None
|
270 |
+
|
271 |
+
for i, title in enumerate(titles):
|
272 |
+
if i >= len(details_list):
|
273 |
+
break
|
274 |
+
|
275 |
+
# Split the details for the current step and strip each one.
|
276 |
+
details = [detail.strip() for detail in details_list[i].split(",")]
|
277 |
+
# Create bullet points using HTML line breaks.
|
278 |
+
bullet_points = "<br/>".join([f"β’ {detail}" for detail in details])
|
279 |
+
# Combine title and bullet points in one node.
|
280 |
+
node_text = f"<b><u>{title}</u></b><br/>{bullet_points}"
|
281 |
+
main_node = f"A{i}[\"{node_text}\"]"
|
282 |
+
mermaid_code += f" {main_node}\n"
|
283 |
+
|
284 |
+
# Apply a custom color for this node.
|
285 |
+
# Using modulo (%) ensures that if there are more nodes than colors,
|
286 |
+
# the list will cycle through.
|
287 |
+
color = colors[i % len(colors)]
|
288 |
+
mermaid_code += f" style A{i} fill:{color},stroke:#333,stroke-width:1.5px;\n"
|
289 |
+
|
290 |
+
# Link this main node with the previous one to create a sequential flow.
|
291 |
+
if previous_step:
|
292 |
+
mermaid_code += f" {previous_step} --> A{i}\n"
|
293 |
+
previous_step = f"A{i}"
|
294 |
+
|
295 |
+
return f"```mermaid\n{mermaid_code}\n```"
|