Update orchestrator.py
Browse files- orchestrator.py +70 -21
orchestrator.py
CHANGED
@@ -40,24 +40,36 @@ It is provided for Your reference after the user has uploaded a resource.
|
|
40 |
This information is primarily for understanding the context of the user's resource.
|
41 |
For the syllabus, you should provide either the raw data or a dynamic summary.\n"""
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
# --- Helper functions and the main process_chat_message function follow below ---
|
63 |
# (The rest of your file remains the same)
|
@@ -138,6 +150,11 @@ def process_chat_message(
|
|
138 |
Processes user message using DSPy modules.
|
139 |
Handles initial resource processing if `uploaded_resource_data` is provided.
|
140 |
"""
|
|
|
|
|
|
|
|
|
|
|
141 |
if not CONVO_MANAGER:
|
142 |
logger.error("Orchestrator's DSPy modules are not initialized. Cannot process message.")
|
143 |
# Return an error state immediately
|
@@ -145,7 +162,8 @@ def process_chat_message(
|
|
145 |
error_state[STATE_STAGE] = STAGE_ERROR
|
146 |
error_state[STATE_HISTORY].append({'role': 'user', 'parts': [{'text': user_message_text}]})
|
147 |
error_state[STATE_HISTORY].append({'role': 'model', 'parts': [{'text': "[FATAL ERROR: AI modules not initialized. Please contact support.]"}]})
|
148 |
-
|
|
|
149 |
|
150 |
|
151 |
new_state = current_session_state.copy()
|
@@ -169,7 +187,10 @@ def process_chat_message(
|
|
169 |
# --- Initial Resource Processing (only if resources are provided AND it's the start of negotiation) ---
|
170 |
#Resources can Be only Uploaded at the start.
|
171 |
if stage == STAGE_START and uploaded_resource_data:
|
|
|
|
|
172 |
logger.info("First turn with resources. Processing them now...")
|
|
|
173 |
|
174 |
total_chars = sum(len(text) for text in uploaded_resource_data.values())
|
175 |
|
@@ -218,6 +239,7 @@ def process_chat_message(
|
|
218 |
if stage == STAGE_START:
|
219 |
new_state[STATE_STAGE] = STAGE_NEGOTIATING
|
220 |
stage = STAGE_NEGOTIATING # Update local stage variable
|
|
|
221 |
|
222 |
logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ConversationManager.")
|
223 |
|
@@ -228,6 +250,8 @@ def process_chat_message(
|
|
228 |
|
229 |
# Get resource overview from state if set, otherwise "None"
|
230 |
resource_overview_for_manager = new_state.get(STATE_RESOURCE_SUMMARY_OVERVIEW, "No resources were processed or provided by the user for this session.")
|
|
|
|
|
231 |
|
232 |
action_code, display_text = CONVO_MANAGER.forward(
|
233 |
conversation_history_str=history_str,
|
@@ -245,6 +269,9 @@ def process_chat_message(
|
|
245 |
|
246 |
# --- Handle Actions from ConversationManager ---
|
247 |
if action_code in ["GENERATE", "MODIFY"]:
|
|
|
|
|
|
|
248 |
task_type_str = "generation" if action_code == "GENERATE" else "modification"
|
249 |
logger.info(f"Syllabus {task_type_str} requested. Resource type: {new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS)}")
|
250 |
retrieved_resource_type = new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS, "NONE")
|
@@ -257,6 +284,8 @@ def process_chat_message(
|
|
257 |
logger.info(f"Syllabus {task_type_str} requested. Resource type from state: {retrieved_resource_type}")
|
258 |
# If type is SUMMARIES, we need to generate them now using DynamicSummarizer
|
259 |
if retrieved_resource_type == "SUMMARIES":
|
|
|
|
|
260 |
raw_data_for_dynamic_summary = new_state.get('raw_resource_data_for_dynamic_summary')
|
261 |
if raw_data_for_dynamic_summary and isinstance(raw_data_for_dynamic_summary, dict):
|
262 |
logger.info("Generating dynamic summaries for syllabus router...")
|
@@ -296,7 +325,11 @@ def process_chat_message(
|
|
296 |
# --- BLOCK 1: XML to Markdown Formatting (and set success flag) ---
|
297 |
if generated_xml and not generated_xml.strip().upper().startswith(("<SYLLABUS>\n[ERROR", "<SYLLABUS>[ERROR")):
|
298 |
syllabus_generation_was_successful = True # Mark initial generation as successful
|
|
|
|
|
299 |
logger.info(f"Syllabus XML generated. Length: {len(generated_xml)}. Attempting Markdown formatting.")
|
|
|
|
|
300 |
|
301 |
if SYLLABUS_XML_TO_MARKDOWN_FORMATTER:
|
302 |
try:
|
@@ -370,6 +403,8 @@ def process_chat_message(
|
|
370 |
logger.info("Finalization requested by manager.")
|
371 |
last_syllabus_in_history = get_last_syllabus_content_from_history(history)
|
372 |
if last_syllabus_in_history:
|
|
|
|
|
373 |
new_state[STATE_FINAL_SYLLABUS] = f"<syllabus>\n{last_syllabus_in_history}\n</syllabus>" # Store it
|
374 |
|
375 |
# Ask for learning style
|
@@ -384,9 +419,12 @@ def process_chat_message(
|
|
384 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
385 |
|
386 |
elif action_code == "PERSONA":
|
|
|
387 |
logger.info("Persona generation triggered by manager.")
|
388 |
final_syllabus_xml_str = new_state.get(STATE_FINAL_SYLLABUS)
|
389 |
if final_syllabus_xml_str:
|
|
|
|
|
390 |
logger.info("Generating explainer prompt body...")
|
391 |
explainer_prompt_body = PERSONA_PROMPT_GENERATOR.forward(
|
392 |
conversation_history_str=format_history_for_dspy(history)
|
@@ -408,6 +446,8 @@ def process_chat_message(
|
|
408 |
history_str="None", # No prior *explainer* history for this first turn
|
409 |
user_query_str=explainer_intro_query
|
410 |
)
|
|
|
|
|
411 |
ai_reply_for_user = explainer_intro_response
|
412 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
413 |
else:
|
@@ -434,7 +474,11 @@ def process_chat_message(
|
|
434 |
|
435 |
# --- Explanation Phase (STAGE_EXPLAINING) ---
|
436 |
elif stage == STAGE_EXPLAINING:
|
|
|
|
|
437 |
logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ExplainerModule.")
|
|
|
|
|
438 |
explainer_sys_prompt = modified_explainer_prompt or new_state.get(STATE_EXPLAINER_PROMPT)
|
439 |
expl_start_idx = new_state.get(STATE_EXPLANATION_START_INDEX, 0)
|
440 |
|
@@ -498,10 +542,15 @@ def process_chat_message(
|
|
498 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
499 |
|
500 |
# --- Final State Update & Return ---
|
501 |
-
new_state[STATE_HISTORY] = history
|
502 |
logger.debug(f"Orchestrator (DSPy) returning: Stage='{new_state.get(STATE_STAGE)}', History Len={len(history)}, AI Reply starts: '{ai_reply_for_user[:50]}...'")
|
503 |
logger.debug(f"Flags: DisplaySyllabus='{new_state.get(STATE_DISPLAY_SYLLABUS_FLAG) is not None}', TransitionExplainer='{new_state.get(STATE_TRANSITION_EXPLAINER_FLAG)}'")
|
|
|
|
|
504 |
|
505 |
-
|
|
|
|
|
|
|
506 |
|
507 |
|
|
|
40 |
This information is primarily for understanding the context of the user's resource.
|
41 |
For the syllabus, you should provide either the raw data or a dynamic summary.\n"""
|
42 |
|
43 |
+
def initialize_orchestrator_modules():
|
44 |
+
"""
|
45 |
+
Instantiates all DSPy modules AFTER the LM has been configured.
|
46 |
+
This function must be called from the main app script.
|
47 |
+
"""
|
48 |
+
global CONVO_MANAGER, SYLLABUS_ROUTER, INITIAL_RESOURCE_SUMMARIZER, DYNAMIC_SUMMARIZER_MODULE, \
|
49 |
+
LEARNING_STYLE_QUESTIONER, PERSONA_PROMPT_GENERATOR, EXPLAINER_MODULE, SYLLABUS_FEEDBACK_REQUESTER, \
|
50 |
+
SYLLABUS_XML_TO_MARKDOWN_FORMATTER, TITLE_GENERATOR_PREDICTOR
|
51 |
+
|
52 |
+
if not dspy.settings.lm:
|
53 |
+
logger.error("Cannot initialize orchestrator modules: DSPy LM is not configured.")
|
54 |
+
return False
|
55 |
+
|
56 |
+
try:
|
57 |
+
CONVO_MANAGER = ConversationManager()
|
58 |
+
SYLLABUS_ROUTER = SyllabusGeneratorRouter()
|
59 |
+
INITIAL_RESOURCE_SUMMARIZER = InitialResourceSummarizer()
|
60 |
+
DYNAMIC_SUMMARIZER_MODULE = DynamicResourceSummarizerModule()
|
61 |
+
LEARNING_STYLE_QUESTIONER = LearningStyleQuestioner()
|
62 |
+
PERSONA_PROMPT_GENERATOR = PersonaPromptGenerator()
|
63 |
+
EXPLAINER_MODULE = ExplainerModule()
|
64 |
+
SYLLABUS_FEEDBACK_REQUESTER = dspy.Predict(SyllabusFeedbackRequestSignature, temperature=0.7)
|
65 |
+
SYLLABUS_XML_TO_MARKDOWN_FORMATTER = dspy.Predict(FormatSyllabusXMLToMarkdown, temperature=0.3)
|
66 |
+
TITLE_GENERATOR_PREDICTOR = dspy.Predict(TitleGenerationSignature, temperature=0.4)
|
67 |
+
logger.info("Orchestrator's DSPy modules initialized successfully.")
|
68 |
+
return True
|
69 |
+
except Exception as e:
|
70 |
+
logger.critical(f"A critical error occurred during orchestrator module initialization: {e}", exc_info=True)
|
71 |
+
return False
|
72 |
+
|
73 |
|
74 |
# --- Helper functions and the main process_chat_message function follow below ---
|
75 |
# (The rest of your file remains the same)
|
|
|
150 |
Processes user message using DSPy modules.
|
151 |
Handles initial resource processing if `uploaded_resource_data` is provided.
|
152 |
"""
|
153 |
+
|
154 |
+
# def yield_feedback(state_to_update: Dict, feedback_key: str):
|
155 |
+
# feedback_state = state_to_update.copy()
|
156 |
+
# feedback_state[STATE_UI_FEEDBACK_MESSAGE] = UI_FEEDBACK_MAP.get(feedback_key, "Processing...")
|
157 |
+
# return feedback_state
|
158 |
if not CONVO_MANAGER:
|
159 |
logger.error("Orchestrator's DSPy modules are not initialized. Cannot process message.")
|
160 |
# Return an error state immediately
|
|
|
162 |
error_state[STATE_STAGE] = STAGE_ERROR
|
163 |
error_state[STATE_HISTORY].append({'role': 'user', 'parts': [{'text': user_message_text}]})
|
164 |
error_state[STATE_HISTORY].append({'role': 'model', 'parts': [{'text': "[FATAL ERROR: AI modules not initialized. Please contact support.]"}]})
|
165 |
+
yield ("final_result", error_state)
|
166 |
+
return
|
167 |
|
168 |
|
169 |
new_state = current_session_state.copy()
|
|
|
187 |
# --- Initial Resource Processing (only if resources are provided AND it's the start of negotiation) ---
|
188 |
#Resources can Be only Uploaded at the start.
|
189 |
if stage == STAGE_START and uploaded_resource_data:
|
190 |
+
yield ("status", "ANALYZING_RESOURCES_INITIAL")
|
191 |
+
|
192 |
logger.info("First turn with resources. Processing them now...")
|
193 |
+
|
194 |
|
195 |
total_chars = sum(len(text) for text in uploaded_resource_data.values())
|
196 |
|
|
|
239 |
if stage == STAGE_START:
|
240 |
new_state[STATE_STAGE] = STAGE_NEGOTIATING
|
241 |
stage = STAGE_NEGOTIATING # Update local stage variable
|
242 |
+
|
243 |
|
244 |
logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ConversationManager.")
|
245 |
|
|
|
250 |
|
251 |
# Get resource overview from state if set, otherwise "None"
|
252 |
resource_overview_for_manager = new_state.get(STATE_RESOURCE_SUMMARY_OVERVIEW, "No resources were processed or provided by the user for this session.")
|
253 |
+
# yield yield_feedback(new_state, "PROCESSING_INPUT")
|
254 |
+
yield ("status", "PROCESSING_INPUT")
|
255 |
|
256 |
action_code, display_text = CONVO_MANAGER.forward(
|
257 |
conversation_history_str=history_str,
|
|
|
269 |
|
270 |
# --- Handle Actions from ConversationManager ---
|
271 |
if action_code in ["GENERATE", "MODIFY"]:
|
272 |
+
yield ("status", "GENERATING_SYLLABUS")
|
273 |
+
|
274 |
+
|
275 |
task_type_str = "generation" if action_code == "GENERATE" else "modification"
|
276 |
logger.info(f"Syllabus {task_type_str} requested. Resource type: {new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS)}")
|
277 |
retrieved_resource_type = new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS, "NONE")
|
|
|
284 |
logger.info(f"Syllabus {task_type_str} requested. Resource type from state: {retrieved_resource_type}")
|
285 |
# If type is SUMMARIES, we need to generate them now using DynamicSummarizer
|
286 |
if retrieved_resource_type == "SUMMARIES":
|
287 |
+
# yield yield_feedback(new_state, "GENERATING_DYNAMIC_SUMMARIES")
|
288 |
+
|
289 |
raw_data_for_dynamic_summary = new_state.get('raw_resource_data_for_dynamic_summary')
|
290 |
if raw_data_for_dynamic_summary and isinstance(raw_data_for_dynamic_summary, dict):
|
291 |
logger.info("Generating dynamic summaries for syllabus router...")
|
|
|
325 |
# --- BLOCK 1: XML to Markdown Formatting (and set success flag) ---
|
326 |
if generated_xml and not generated_xml.strip().upper().startswith(("<SYLLABUS>\n[ERROR", "<SYLLABUS>[ERROR")):
|
327 |
syllabus_generation_was_successful = True # Mark initial generation as successful
|
328 |
+
yield ("status", "FORMATTING_SYLLABUS")
|
329 |
+
|
330 |
logger.info(f"Syllabus XML generated. Length: {len(generated_xml)}. Attempting Markdown formatting.")
|
331 |
+
# yield yield_feedback(new_state, "FORMATTING_SYLLABUS")
|
332 |
+
|
333 |
|
334 |
if SYLLABUS_XML_TO_MARKDOWN_FORMATTER:
|
335 |
try:
|
|
|
403 |
logger.info("Finalization requested by manager.")
|
404 |
last_syllabus_in_history = get_last_syllabus_content_from_history(history)
|
405 |
if last_syllabus_in_history:
|
406 |
+
# yield yield_feedback(new_state, "FINALIZING_SYLLABUS")
|
407 |
+
|
408 |
new_state[STATE_FINAL_SYLLABUS] = f"<syllabus>\n{last_syllabus_in_history}\n</syllabus>" # Store it
|
409 |
|
410 |
# Ask for learning style
|
|
|
419 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
420 |
|
421 |
elif action_code == "PERSONA":
|
422 |
+
yield ("status", "GENERATING_PERSONA")
|
423 |
logger.info("Persona generation triggered by manager.")
|
424 |
final_syllabus_xml_str = new_state.get(STATE_FINAL_SYLLABUS)
|
425 |
if final_syllabus_xml_str:
|
426 |
+
# yield yield_feedback(new_state, "GENERATING_PERSONA")
|
427 |
+
|
428 |
logger.info("Generating explainer prompt body...")
|
429 |
explainer_prompt_body = PERSONA_PROMPT_GENERATOR.forward(
|
430 |
conversation_history_str=format_history_for_dspy(history)
|
|
|
446 |
history_str="None", # No prior *explainer* history for this first turn
|
447 |
user_query_str=explainer_intro_query
|
448 |
)
|
449 |
+
yield ("status", "TUTOR_INTRODUCTION")
|
450 |
+
|
451 |
ai_reply_for_user = explainer_intro_response
|
452 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
453 |
else:
|
|
|
474 |
|
475 |
# --- Explanation Phase (STAGE_EXPLAINING) ---
|
476 |
elif stage == STAGE_EXPLAINING:
|
477 |
+
yield ("status", "EXPLAINER_RESPONSE")
|
478 |
+
|
479 |
logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ExplainerModule.")
|
480 |
+
# yield yield_feedback(new_state, "EXPLAINER_RESPONSE")
|
481 |
+
|
482 |
explainer_sys_prompt = modified_explainer_prompt or new_state.get(STATE_EXPLAINER_PROMPT)
|
483 |
expl_start_idx = new_state.get(STATE_EXPLANATION_START_INDEX, 0)
|
484 |
|
|
|
542 |
history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
|
543 |
|
544 |
# --- Final State Update & Return ---
|
545 |
+
# new_state[STATE_HISTORY] = history
|
546 |
logger.debug(f"Orchestrator (DSPy) returning: Stage='{new_state.get(STATE_STAGE)}', History Len={len(history)}, AI Reply starts: '{ai_reply_for_user[:50]}...'")
|
547 |
logger.debug(f"Flags: DisplaySyllabus='{new_state.get(STATE_DISPLAY_SYLLABUS_FLAG) is not None}', TransitionExplainer='{new_state.get(STATE_TRANSITION_EXPLAINER_FLAG)}'")
|
548 |
+
new_state[STATE_HISTORY] = history
|
549 |
+
# new_state.pop(STATE_UI_FEEDBACK_MESSAGE, None) # Clear feedback message for the final state
|
550 |
|
551 |
+
new_state[STATE_HISTORY] = history
|
552 |
+
yield ("final_result", new_state)
|
553 |
+
|
554 |
+
|
555 |
|
556 |
|