File size: 31,536 Bytes
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
 
 
 
17dace3
 
 
 
 
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
e0d1ba6
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
17dace3
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
17dace3
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
 
17dace3
e0d1ba6
 
17dace3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0d1ba6
17dace3
 
e0d1ba6
 
17dace3
e0d1ba6
 
 
 
17dace3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
# FILE: orchestrator.py
# (Corrected Imports and Module Instantiation)

import logging
import json
from typing import List, Dict, Any, Tuple, Optional
import dspy

# --- 1. Corrected Imports from Project Modules ---

# Import the constants defined in config.py using a correct relative import.
# We no longer import the initialize_dspy function here.
from config import (
    STATE_STAGE, STATE_HISTORY, STATE_FINAL_SYLLABUS, STATE_EXPLAINER_PROMPT,
    STATE_EXPLANATION_START_INDEX, STATE_CURRENT_TITLE, STATE_GENERATED_TITLE,
    STATE_RESOURCE_SUMMARY_OVERVIEW, STATE_RESOURCE_TYPE_FOR_SYLLABUS,
    STATE_RESOURCE_CONTENT_JSON_FOR_SYLLABUS, STATE_DISPLAY_SYLLABUS_FLAG,
    STATE_TRANSITION_EXPLAINER_FLAG, STAGE_START, STAGE_NEGOTIATING,
    STAGE_EXPLAINING, STAGE_ERROR, DEFAULT_CHAT_TITLE,
    TITLE_GENERATION_THRESHOLD, TITLE_MAX_HISTORY_SNIPPET_FOR_TITLE
)

# Import the synchronous DSPy modules and signatures.
from dspy_modules import (
    ConversationManager,
    SyllabusGeneratorRouter,
    InitialResourceSummarizer,
    DynamicResourceSummarizerModule,
    LearningStyleQuestioner,
    PersonaPromptGenerator,
    ExplainerModule
)
from dspy_signatures import SyllabusFeedbackRequestSignature, FormatSyllabusXMLToMarkdown, TitleGenerationSignature

logger = logging.getLogger(__name__)

initial_summary_info = """
This Resource Summary is visible only to You (the agent/system) and not to the end-user.
It is provided for Your reference after the user has uploaded a resource.
This information is primarily for understanding the context of the user's resource.
For the syllabus, you should provide either the raw data or a dynamic summary.\n"""

def initialize_orchestrator_modules():
    """
    Instantiates all DSPy modules AFTER the LM has been configured.
    This function must be called from the main app script.
    """
    global CONVO_MANAGER, SYLLABUS_ROUTER, INITIAL_RESOURCE_SUMMARIZER, DYNAMIC_SUMMARIZER_MODULE, \
           LEARNING_STYLE_QUESTIONER, PERSONA_PROMPT_GENERATOR, EXPLAINER_MODULE, SYLLABUS_FEEDBACK_REQUESTER, \
           SYLLABUS_XML_TO_MARKDOWN_FORMATTER, TITLE_GENERATOR_PREDICTOR

    if not dspy.settings.lm:
        logger.error("Cannot initialize orchestrator modules: DSPy LM is not configured.")
        return False

    try:
        CONVO_MANAGER = ConversationManager()
        SYLLABUS_ROUTER = SyllabusGeneratorRouter()
        INITIAL_RESOURCE_SUMMARIZER = InitialResourceSummarizer()
        DYNAMIC_SUMMARIZER_MODULE = DynamicResourceSummarizerModule()
        LEARNING_STYLE_QUESTIONER = LearningStyleQuestioner()
        PERSONA_PROMPT_GENERATOR = PersonaPromptGenerator()
        EXPLAINER_MODULE = ExplainerModule()
        SYLLABUS_FEEDBACK_REQUESTER = dspy.Predict(SyllabusFeedbackRequestSignature, temperature=0.7)
        SYLLABUS_XML_TO_MARKDOWN_FORMATTER = dspy.Predict(FormatSyllabusXMLToMarkdown, temperature=0.3)
        TITLE_GENERATOR_PREDICTOR = dspy.Predict(TitleGenerationSignature, temperature=0.4)
        logger.info("Orchestrator's DSPy modules initialized successfully.")
        return True
    except Exception as e:
        logger.critical(f"A critical error occurred during orchestrator module initialization: {e}", exc_info=True)
        return False


# --- Helper functions and the main process_chat_message function follow below ---
# (The rest of your file remains the same)

def format_history_for_dspy(history_list: List[Dict[str, Any]]) -> str:
    formatted_history = []
    for turn in history_list:
        content = ""
        if isinstance(turn.get('parts'), list) and turn['parts']:
            content = turn['parts'][0]['text']
        elif isinstance(turn.get('parts'), str):
            content = turn['parts']

        role = turn.get('role', 'unknown')
        if role == 'model':
            role = 'assistant'  # Replace 'model' with 'assistant'

        formatted_history.append(f"{role}: {content}")
    return "\n---\n".join(formatted_history)

# The role part for model has been replaced with assistant for compatibility with litellm.

def get_last_syllabus_content_from_history(history: List[Dict[str, Any]]) -> Optional[str]:

    logger.debug("Helper: Searching history backwards for last syllabus-typed message...")
    if not history:
        logger.warning("Helper: History is empty, cannot find syllabus.")
        return None

    for i in range(len(history) - 1, -1, -1):
        message = history[i]
        msg_role = message.get('role')
        msg_type = message.get('message_type') # Get the message_type

        logger.debug(f"Helper: Checking history index {i}, Role: '{msg_role}', Type: '{msg_type}'")

        # We are looking for messages from 'model' or 'system' that are explicitly typed
        # as either 'syllabus' (for old XML format) or 'syllabus_markdown' (for new Markdown format).
        if msg_role in ['model', 'system'] and msg_type in ['syllabus', 'syllabus_markdown']:
            content = ""
            parts_list = message.get('parts', [])

            if isinstance(parts_list, list) and len(parts_list) > 0:
                first_part = parts_list[0]
                if isinstance(first_part, dict):
                    content = first_part.get('text', '')
                elif isinstance(first_part, str):
                    content = first_part
            elif isinstance(parts_list, str): # Handle if 'parts' itself was saved as a string
                content = parts_list
            elif 'content' in message: # Fallback if structure is simpler like {'role': ..., 'content': ...}
                logger.debug("Helper: 'parts' key not found or empty, trying 'content' key directly.")
                if isinstance(message.get('content'), str):
                    content = message.get('content', '')


            if content:
                logger.info(f"Helper: FOUND syllabus content via message_type '{msg_type}' at index {i}. Content starts: '{content[:70]}...'")
                return content.strip() # Return the full content of this message
            else:
                logger.warning(f"Helper: Found syllabus-typed message at index {i} but content was empty.")
                # Continue searching

    logger.warning("Helper: Finished searching history, did not find a valid syllabus-typed message with content.")
    return None




# --- Main Orchestration Logic ---
def process_chat_message(
    user_message_text: str, 
    current_session_state: Dict[str, Any],
    modified_explainer_prompt: Optional[str] = None ,
    uploaded_resource_data: Optional[Dict[str, str]] = None # Filename -> text content
) -> Tuple[str, Dict[str, Any]]:
    """
    Processes user message using DSPy modules.
    Handles initial resource processing if `uploaded_resource_data` is provided.
    """

    # def yield_feedback(state_to_update: Dict, feedback_key: str):
    #     feedback_state = state_to_update.copy()
    #     feedback_state[STATE_UI_FEEDBACK_MESSAGE] = UI_FEEDBACK_MAP.get(feedback_key, "Processing...")
    #     return feedback_state
    if not CONVO_MANAGER: 
        logger.error("Orchestrator's DSPy modules are not initialized. Cannot process message.")
        # Return an error state immediately
        error_state = current_session_state.copy()
        error_state[STATE_STAGE] = STAGE_ERROR
        error_state[STATE_HISTORY].append({'role': 'user', 'parts': [{'text': user_message_text}]})
        error_state[STATE_HISTORY].append({'role': 'model', 'parts': [{'text': "[FATAL ERROR: AI modules not initialized. Please contact support.]"}]})
        yield ("final_result", error_state)
        return 
    
    
    new_state = current_session_state.copy()
    new_state.pop(STATE_DISPLAY_SYLLABUS_FLAG, None)
    new_state.pop(STATE_TRANSITION_EXPLAINER_FLAG, None)
    new_state.pop(STATE_GENERATED_TITLE, None)


  
    stage = new_state.get(STATE_STAGE, STAGE_START)
    history: List[Dict[str, Any]] = new_state.get(STATE_HISTORY, []) # History from view already includes latest user msg
    current_title = new_state.get(STATE_CURRENT_TITLE, DEFAULT_CHAT_TITLE)
    
    ai_reply_for_user = ""
    
    logger.debug(f"Orchestrator (DSPy) received: Stage='{stage}', Title='{current_title}', History len={len(history)}")
    if uploaded_resource_data:
        logger.info(f"Processing {len(uploaded_resource_data)} uploaded resources.")

    try:
        # --- Initial Resource Processing (only if resources are provided AND it's the start of negotiation) ---
        #Resources can Be only Uploaded at the start.
        if stage == STAGE_START and uploaded_resource_data:
            yield ("status", "ANALYZING_RESOURCES_INITIAL")

            logger.info("First turn with resources. Processing them now...")

            
            total_chars = sum(len(text) for text in uploaded_resource_data.values())
            
            resource_summary_for_manager = "Resources were provided by the user." # Default
            resource_type_for_syllabus = "NONE"
            resource_content_json = "{}"
            #Syllabus Segregation

            if not uploaded_resource_data:
                resource_summary_for_manager = "No resources were processed or user did not provide any."
                resource_type_for_syllabus = "NONE"
            elif total_chars > 70000: # Heuristic from your notebook for "heavy" resources
                logger.info(f"Total resource chars ({total_chars}) > 70k. Using DYNAMIC SUMMARIES for syllabus gen.")
                resource_type_for_syllabus = "SUMMARIES"
                # For manager, provide an overview from InitialResourceSummarizer
                # Truncate content for initial summary if very large before sending to InitialResourceSummarizer
                initial_summary_input_dict = {
                    fname: content[:40000] for fname, content in uploaded_resource_data.items()
                }
                resource_summary_for_manager = INITIAL_RESOURCE_SUMMARIZER.forward(initial_summary_input_dict)

                new_state['raw_resource_data_for_dynamic_summary'] = uploaded_resource_data # Store full data
            else:
                logger.info(f"Total resource chars ({total_chars}) <= 70k. Using RAW TEXT for syllabus gen.")
                resource_type_for_syllabus = "RAW_TEXT"
                initial_summary_input_dict = {
                    fname: content[:40000] for fname, content in uploaded_resource_data.items()
                }
                resource_summary_for_manager =  INITIAL_RESOURCE_SUMMARIZER.forward(initial_summary_input_dict)
                resource_content_json = json.dumps(uploaded_resource_data, indent=2)
            new_state[STATE_RESOURCE_SUMMARY_OVERVIEW] = resource_summary_for_manager
            new_state[STATE_RESOURCE_TYPE_FOR_SYLLABUS] = resource_type_for_syllabus
            new_state[STATE_RESOURCE_CONTENT_JSON_FOR_SYLLABUS] = resource_content_json
            new_state['raw_resource_data_for_dynamic_summary'] = uploaded_resource_data # Alreday done upside
            
            
            # This should be done if Only History length is less than 2.
            if resource_summary_for_manager and resource_summary_for_manager != "No resources were processed or user did not provide any." and len(history)<=2:
                
                history.append({'role': 'model', 'parts': [{"text" : str(initial_summary_info) + str(resource_summary_for_manager)}],'message_type': 'internal_resource_summary'})
                


        # --- Negotiation Phase (STAGE_START, STAGE_NEGOTIATING) ---
        if stage in [STAGE_START, STAGE_NEGOTIATING]:
            if stage == STAGE_START:
                new_state[STATE_STAGE] = STAGE_NEGOTIATING
                stage = STAGE_NEGOTIATING # Update local stage variable
            

            logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ConversationManager.")
            
            history_str = format_history_for_dspy(history)
            current_syllabus_xml_str = new_state.get(STATE_FINAL_SYLLABUS) or \
                                       get_last_syllabus_content_from_history(history) or \
                                       "None" # Try to get latest syllabus for manager

            # Get resource overview from state if set, otherwise "None"
            resource_overview_for_manager = new_state.get(STATE_RESOURCE_SUMMARY_OVERVIEW, "No resources were processed or provided by the user for this session.")
            # yield yield_feedback(new_state, "PROCESSING_INPUT")
            yield ("status", "PROCESSING_INPUT")

            action_code, display_text = CONVO_MANAGER.forward(
                conversation_history_str=history_str,
                current_syllabus_xml=current_syllabus_xml_str,
                user_input=user_message_text, # Manager needs the latest user message explicitly

            )
            logger.info(f"ConversationManager action: '{action_code}', display_text: '{display_text[:100]}...'")
            
            ai_reply_for_user = display_text # This will be empty if action is not CONVERSE
            if display_text:
                history.append({'role': 'model', 'parts': [{'text': display_text}]})
            
            

            # --- Handle Actions from ConversationManager ---
            if action_code in ["GENERATE", "MODIFY"]:
                yield ("status", "GENERATING_SYLLABUS")


                task_type_str = "generation" if action_code == "GENERATE" else "modification"
                logger.info(f"Syllabus {task_type_str} requested. Resource type: {new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS)}")
                retrieved_resource_type = new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS, "NONE")
                retrieved_resource_content_json = new_state.get(STATE_RESOURCE_CONTENT_JSON_FOR_SYLLABUS, "{}")
                _temp_resource_type = new_state.get(STATE_RESOURCE_TYPE_FOR_SYLLABUS) # Get value, could be Python None
                if _temp_resource_type is None: 
                    retrieved_resource_type = "NONE"
                else:
                    retrieved_resource_type = _temp_resource_type
                logger.info(f"Syllabus {task_type_str} requested. Resource type from state: {retrieved_resource_type}")
                # If type is SUMMARIES, we need to generate them now using DynamicSummarizer
                if retrieved_resource_type  == "SUMMARIES":
                    # yield yield_feedback(new_state, "GENERATING_DYNAMIC_SUMMARIES")

                    raw_data_for_dynamic_summary = new_state.get('raw_resource_data_for_dynamic_summary')
                    if raw_data_for_dynamic_summary and isinstance(raw_data_for_dynamic_summary, dict):
                        logger.info("Generating dynamic summaries for syllabus router...")
                        summaries_for_syllabus = {}
                        history_str_for_summarizer = format_history_for_dspy(history) # Fresh history string
                        for res_id, res_content in raw_data_for_dynamic_summary.items():
                            summary_dict = DYNAMIC_SUMMARIZER_MODULE.forward(
                                resource_content=res_content,
                                resource_identifier=res_id,
                                conversation_history_str=history_str_for_summarizer
                            )
                            if summary_dict:
                                summaries_for_syllabus[res_id] = summary_dict
                        current_resource_content_json = json.dumps(summaries_for_syllabus, indent=2)
                        logger.info(f"Dynamic summaries generated. JSON length: {len(current_resource_content_json)}")
                
                    else:
                        logger.warning("SUMMARIES type selected but no 'raw_resource_data_for_dynamic_summary' found. Falling back to NONE.")
                        current_resource_type = "NONE"
                        current_resource_content_json = "{}"
                if retrieved_resource_type == "RAW_TEXT":
                    current_resource_content_json = retrieved_resource_content_json

                

                generated_xml =  SYLLABUS_ROUTER.forward(
                                    conversation_history_str=format_history_for_dspy(history),
                                    resource_type=retrieved_resource_type,
                                    resource_content=current_resource_content_json if retrieved_resource_type != "NONE" else None
                                )
                print(retrieved_resource_type)
                
                final_syllabus_content_for_frontend = generated_xml 
                message_content_type_for_syllabus_display = 'syllabus_markdown' 
                syllabus_generation_was_successful = False # Initialize flag

                # --- BLOCK 1: XML to Markdown Formatting (and set success flag) ---
                if generated_xml and not generated_xml.strip().upper().startswith(("<SYLLABUS>\n[ERROR", "<SYLLABUS>[ERROR")):
                    syllabus_generation_was_successful = True # Mark initial generation as successful
                    yield ("status", "FORMATTING_SYLLABUS")

                    logger.info(f"Syllabus XML generated. Length: {len(generated_xml)}. Attempting Markdown formatting.")
                    # yield yield_feedback(new_state, "FORMATTING_SYLLABUS")

                    
                    if SYLLABUS_XML_TO_MARKDOWN_FORMATTER:
                        try:
                            format_prediction =  SYLLABUS_XML_TO_MARKDOWN_FORMATTER(
                                syllabus_xml_input=generated_xml
                            )
                            formatted_markdown = format_prediction.cleaned_syllabus_markdown.strip()
                            
                            if formatted_markdown and not formatted_markdown.lower().startswith(("[error", "[warn")):
                                final_syllabus_content_for_frontend = formatted_markdown
                                # message_content_type_for_syllabus_display = 'syllabus'
                                logger.info("Syllabus successfully formatted to Markdown.")
                            else:
                                logger.warning(f"Syllabus Markdown formatting returned empty/error: {formatted_markdown[:100]}. Using raw XML (from router).")

                        except Exception as fmt_e:
                            logger.error(f"Error during syllabus XML to Markdown formatting: {fmt_e}", exc_info=True)
                    else:
                        logger.warning("SYLLABUS_XML_TO_MARKDOWN_FORMATTER not available. Using raw XML (from router).")

                else:
                   
                    syllabus_generation_was_successful = False # Explicitly false
                    logger.error(f"Syllabus XML generation by SYLLABUS_ROUTER failed or returned error: {generated_xml[:200]}")
                    
                # --- BLOCK 2: Add syllabus to history and state ---
                # This message is the syllabus display itself (or the error from the router if generation failed)
                history.append({
                    'role': 'model', 
                    'parts': [{'text': final_syllabus_content_for_frontend}],
                    'message_type': message_content_type_for_syllabus_display 
                })
                print(history[-1])
                new_state[STATE_DISPLAY_SYLLABUS_FLAG] = { 
                    "content": final_syllabus_content_for_frontend,
                    "type": message_content_type_for_syllabus_display
                }

                # --- NEW BLOCK 3: Generate Conversational Reply (Feedback or Error) ---
                if syllabus_generation_was_successful:
                    # The syllabus (Markdown or XML) is already in history. Now add the feedback prompt.
                    logger.info(f"Syllabus processed for display (type: {message_content_type_for_syllabus_display}). Requesting user feedback.")
                    if SYLLABUS_FEEDBACK_REQUESTER:
                        try:

                            history_for_feedback_str = format_history_for_dspy(history) 
                            feedback_prediction = SYLLABUS_FEEDBACK_REQUESTER(
                                conversation_history_with_syllabus=history_for_feedback_str
                            )
                            ai_reply_for_user = feedback_prediction.feedback_query_to_user.strip()
                            if not ai_reply_for_user:
                                logger.warning("SYLLABUS_FEEDBACK_REQUESTER returned empty, using fallback.")
                                ai_reply_for_user = "I've drafted the syllabus. What are your thoughts?"
                        except Exception as fb_err:
                            logger.error(f"Error calling SYLLABUS_FEEDBACK_REQUESTER: {fb_err}", exc_info=True)
                            ai_reply_for_user = "Here is the syllabus draft. How does it look?"
                    else:
                        logger.error("SYLLABUS_FEEDBACK_REQUESTER not initialized. Using hardcoded feedback prompt.")
                        ai_reply_for_user = "I've prepared the syllabus. Please review it."
                    
                    # Add the feedback prompt as the next message in history
                    history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
                    
                else: 

                    ai_reply_for_user = final_syllabus_content_for_frontend
                    logger.info(f"Syllabus generation failed. AI reply set to the error from router: {ai_reply_for_user[:100]}")

            
            elif action_code == "FINALIZE":
                logger.info("Finalization requested by manager.")
                last_syllabus_in_history = get_last_syllabus_content_from_history(history)
                if last_syllabus_in_history:
                    # yield yield_feedback(new_state, "FINALIZING_SYLLABUS")

                    new_state[STATE_FINAL_SYLLABUS] = f"<syllabus>\n{last_syllabus_in_history}\n</syllabus>" # Store it
                    
                    # Ask for learning style
                    style_question =  LEARNING_STYLE_QUESTIONER.forward(
                        conversation_history_str=format_history_for_dspy(history)
                    )
                    ai_reply_for_user = style_question
                    history.append({'role': 'model', 'parts': [{'text': style_question}]})
                else:
                    logger.warning("FINALIZE action but no syllabus found in history.")
                    ai_reply_for_user = "It seems we don't have a syllabus to finalize yet. Could we create one first?"
                    history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})

            elif action_code == "PERSONA":
                yield ("status", "GENERATING_PERSONA")
                logger.info("Persona generation triggered by manager.")
                final_syllabus_xml_str = new_state.get(STATE_FINAL_SYLLABUS)
                if final_syllabus_xml_str:
                    # yield yield_feedback(new_state, "GENERATING_PERSONA")

                    logger.info("Generating explainer prompt body...")
                    explainer_prompt_body = PERSONA_PROMPT_GENERATOR.forward(
                        conversation_history_str=format_history_for_dspy(history)
                    )
                    if explainer_prompt_body:
                        full_explainer_prompt = f"{explainer_prompt_body}\n\nHere is the syllabus we will follow:\n{final_syllabus_xml_str}"
                        print(full_explainer_prompt)
                        new_state[STATE_EXPLAINER_PROMPT] = full_explainer_prompt
                        new_state[STATE_STAGE] = STAGE_EXPLAINING # << TRANSITION STAGE
                        new_state[STATE_TRANSITION_EXPLAINER_FLAG] = True
                        new_state[STATE_EXPLANATION_START_INDEX] = len(history) # Record index before explainer intro

                        logger.info("Explainer prompt generated. Moving to EXPLAINING stage.")
                        

                        explainer_intro_query = "Based on your persona (defined in system_instructions) and the syllabus provided, please introduce yourself to the user. Briefly state what you'll be helping them with and adopt a welcoming tone consistent with your persona."
                        explainer_intro_response = EXPLAINER_MODULE.forward(
                            system_instructions_str=full_explainer_prompt,
                            history_str="None", # No prior *explainer* history for this first turn
                            user_query_str=explainer_intro_query
                        )
                        yield ("status", "TUTOR_INTRODUCTION")

                        ai_reply_for_user = explainer_intro_response
                        history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
                    else:
                        logger.error("Failed to generate explainer prompt body.")
                        ai_reply_for_user = "Sorry, I had trouble setting up the learning session. Please try again."
                        history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
                        new_state[STATE_STAGE] = STAGE_ERROR
                else:
                    logger.warning("PERSONA action but no finalized syllabus in state.")
                    ai_reply_for_user = "We need to finalize a syllabus before we can tailor the tutor. Shall we continue with that?"
                    history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
            
            elif action_code == "CONVERSE":
                # ai_reply_for_user is already set from manager's display_text
                if not ai_reply_for_user: # Should not happen if manager follows rules
                    logger.warning("CONVERSE action but manager provided no display_text. Using fallback.")
                    ai_reply_for_user = "Okay, how would you like to proceed?"
                    history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
            
            else: 
                logger.error(f"Unknown action_code '{action_code}' from ConversationManager.")
                ai_reply_for_user = "I'm not sure how to proceed with that. Could you clarify?"
                history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})

        # --- Explanation Phase (STAGE_EXPLAINING) ---
        elif stage == STAGE_EXPLAINING:
            yield ("status", "EXPLAINER_RESPONSE")

            logger.info(f"Orchestrator (DSPy): Stage={stage}. Calling ExplainerModule.")
            # yield yield_feedback(new_state, "EXPLAINER_RESPONSE")

            explainer_sys_prompt = modified_explainer_prompt or new_state.get(STATE_EXPLAINER_PROMPT)
            expl_start_idx = new_state.get(STATE_EXPLANATION_START_INDEX, 0)

            if not explainer_sys_prompt:
                logger.error("Explainer stage but no explainer_system_prompt in state.")
                ai_reply_for_user = "[SYSTEM ERROR: Explainer setup incomplete. Cannot proceed.]"
                history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
                new_state[STATE_STAGE] = STAGE_ERROR
            else:
                # For explainer, only pass relevant part of history (after persona setup)
                explainer_relevant_history_str = format_history_for_dspy(history[expl_start_idx:])
                
                explainer_response =  EXPLAINER_MODULE.forward(
                    system_instructions_str=explainer_sys_prompt,
                    history_str=explainer_relevant_history_str,
                    user_query_str=user_message_text
                )
                ai_reply_for_user = explainer_response
                history.append({'role': 'model', 'parts': [{'text': explainer_response}]})
        
        # --- Error Stage ---
        elif stage == STAGE_ERROR:
            logger.warning("Orchestrator is in ERROR stage.")
            ai_reply_for_user = "I'm sorry, an internal error occurred. Please try starting a new conversation or contact support."
            # To prevent loops, don't add this generic error to history if user just messaged. Let user try again.

        # --- Unknown Stage ---
        else:
            logger.error(f"Orchestrator encountered an unknown stage: {stage}")
            ai_reply_for_user = "[SYSTEM ERROR: Invalid application state. Please start over.]"
            history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
            new_state[STATE_STAGE] = STAGE_ERROR

        # --- Title Generation Logic (Simplified to use DSPy Predict) ---
        final_message_count = len(history)
        if current_title == DEFAULT_CHAT_TITLE and final_message_count >= TITLE_GENERATION_THRESHOLD:
            logger.info("Conditions met for title generation.")
            # Prepare a snippet of history for the title generator
            history_for_title_str = format_history_for_dspy(history[:TITLE_MAX_HISTORY_SNIPPET_FOR_TITLE])
            if TITLE_GENERATOR_PREDICTOR:
                try:
                    title_prediction =   TITLE_GENERATOR_PREDICTOR(chat_history_summary=history_for_title_str) # await predict
                    generated_title_text = title_prediction.chat_title.strip().strip('"\'')
                    if generated_title_text and not generated_title_text.lower().startswith(("[error", "[warn", "[empty")):
                        new_state[STATE_GENERATED_TITLE] = generated_title_text[:150] # Max length
                        logger.info(f"Generated title: '{new_state[STATE_GENERATED_TITLE]}'")
                    else:
                        logger.warning(f"Title generator returned empty or error-like: {generated_title_text}")
                except Exception as title_e:
                    logger.error(f"Error during title generation predictor call: {title_e}", exc_info=True)
            else:
                logger.error("TITLE_GENERATOR_PREDICTOR not initialized.")


    except Exception as e:
        logger.error(f"Orchestrator (DSPy): Unhandled exception: {e}", exc_info=True)
        ai_reply_for_user = "[SYSTEM ERROR: An unexpected issue occurred. Please try again.]"
        new_state[STATE_STAGE] = STAGE_ERROR
        # Ensure error is logged to history if not already the last message
        if not history or not (history[-1]['role'] == 'model' and history[-1]['parts'][0]['text'] == ai_reply_for_user):
            history.append({'role': 'model', 'parts': [{'text': ai_reply_for_user}]})
    
    # --- Final State Update & Return ---
    # new_state[STATE_HISTORY] = history
    logger.debug(f"Orchestrator (DSPy) returning: Stage='{new_state.get(STATE_STAGE)}', History Len={len(history)}, AI Reply starts: '{ai_reply_for_user[:50]}...'")
    logger.debug(f"Flags: DisplaySyllabus='{new_state.get(STATE_DISPLAY_SYLLABUS_FLAG) is not None}', TransitionExplainer='{new_state.get(STATE_TRANSITION_EXPLAINER_FLAG)}'")
    new_state[STATE_HISTORY] = history
    # new_state.pop(STATE_UI_FEEDBACK_MESSAGE, None) # Clear feedback message for the final state
    
    new_state[STATE_HISTORY] = history
    yield ("final_result", new_state)