|
|
|
|
|
import gradio as gr |
|
import logging |
|
import os |
|
from typing import List, Dict, Any, Optional |
|
|
|
|
|
from config import initialize_dspy |
|
custom_lm = initialize_dspy() |
|
|
|
|
|
from config import ( |
|
API_KEY, STATE_STAGE, STATE_HISTORY, STAGE_START, STAGE_EXPLAINING, |
|
STATE_EXPLAINER_PROMPT |
|
) |
|
from resource_processor import process_uploaded_files |
|
from orchestrator import process_chat_message |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='{levelname} {asctime} [%(name)s]: {message}', style='{') |
|
logger = logging.getLogger(__name__) |
|
|
|
def respond( |
|
user_message: str, |
|
chat_history_ui: List[Dict[str, str]], |
|
app_state: Dict[str, Any], |
|
uploaded_files: Optional[List[Any]], |
|
explainer_prompt_display: str |
|
): |
|
""" |
|
Core backend function. Receives UI state, calls orchestrator, and returns updated UI state. |
|
This version also handles clearing the input textbox directly. |
|
""" |
|
|
|
if not user_message.strip(): |
|
|
|
return chat_history_ui, app_state, gr.update(), gr.update(), gr.update(), user_message |
|
|
|
|
|
if not custom_lm: |
|
error_msg = "FATAL ERROR: AI Backend is not configured. Please check your .env file for a valid GOOGLE_API_KEY." |
|
chat_history_ui.append({"role": "user", "content": user_message}) |
|
chat_history_ui.append({"role": "assistant", "content": error_msg}) |
|
|
|
yield chat_history_ui, app_state, gr.update(visible=True), gr.update(), gr.update(), "" |
|
return |
|
|
|
|
|
chat_history_ui.append({"role": "user", "content": user_message}) |
|
app_state[STATE_HISTORY].append({'role': 'user', 'parts': [{'text': user_message}]}) |
|
chat_history_ui.append({"role": "assistant", "content": ""}) |
|
|
|
|
|
yield chat_history_ui, app_state, gr.update(visible=False), gr.update(), gr.update(), user_message |
|
|
|
|
|
processed_file_data = None |
|
if app_state.get(STATE_STAGE) == STAGE_START and uploaded_files: |
|
logger.info(f"Processing {len(uploaded_files)} files for new chat session.") |
|
processed_file_data = process_uploaded_files(uploaded_files) |
|
|
|
|
|
try: |
|
final_user_facing_reply, new_state = process_chat_message( |
|
user_message_text=user_message, |
|
current_session_state=app_state, |
|
uploaded_resource_data=processed_file_data, |
|
modified_explainer_prompt=explainer_prompt_display |
|
) |
|
app_state = new_state |
|
except Exception as e: |
|
logger.error(f"Critical error in orchestrator call: {e}", exc_info=True) |
|
final_user_facing_reply = f"[SYSTEM ERROR: An exception occurred in the agent's logic. Please check the logs. Details: {e}]" |
|
|
|
|
|
syllabus_flag_data = app_state.get("display_syllabus_flag") |
|
if syllabus_flag_data: |
|
syllabus_content = syllabus_flag_data.get("content", "Error displaying syllabus.") |
|
chat_history_ui[-1] = {"role": "assistant", "content": syllabus_content} |
|
chat_history_ui.append({"role": "assistant", "content": final_user_facing_reply}) |
|
app_state.pop("display_syllabus_flag", None) |
|
else: |
|
chat_history_ui[-1]['content'] = final_user_facing_reply |
|
|
|
|
|
prompt_update = gr.update(visible=False) |
|
header_update = gr.update(visible=False) |
|
if new_state.get(STATE_STAGE) == STAGE_EXPLAINING: |
|
prompt_value = explainer_prompt_display if explainer_prompt_display else new_state.get(STATE_EXPLAINER_PROMPT) |
|
prompt_update = gr.update(value=prompt_value, visible=True) |
|
header_update = gr.update(visible=True) |
|
|
|
|
|
yield chat_history_ui, app_state, gr.update(visible=False), header_update, prompt_update, "" |
|
|
|
def start_new_session(): |
|
""" Resets the chat history, internal state, and all UI components for a new conversation. """ |
|
logger.info("UI action: Starting new session.") |
|
initial_state = { |
|
STATE_STAGE: STAGE_START, |
|
STATE_HISTORY: [{'role': 'model', 'parts': [{'text': 'Hello! What would you like to learn about today?'}]}] |
|
} |
|
initial_chat_history_ui = [{"role": "assistant", "content": "Hello! What would you like to learn about today?"}] |
|
|
|
return ( |
|
initial_chat_history_ui, |
|
initial_state, |
|
gr.update(value=[], visible=True), |
|
gr.update(visible=False), |
|
gr.update(value="", visible=False), |
|
"" |
|
) |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft(), title="Forge Guide AI Tutor") as demo: |
|
gr.Markdown("# Forge Guide: AI Syllabus Architect") |
|
gr.Markdown("Start a new conversation by describing what you want to learn. For new chats, you can also upload resources like PDFs or text files.") |
|
|
|
app_state = gr.State({ |
|
STATE_STAGE: STAGE_START, |
|
STATE_HISTORY: [{'role': 'model', 'parts': [{'text': 'Hello! What would you like to learn about today?'}]}] |
|
}) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
chatbot = gr.Chatbot( |
|
[{"role": "assistant", "content": "Hello! What would you like to learn about today?"}], |
|
elem_id="chatbot", |
|
height=650, |
|
render_markdown=True, |
|
avatar_images=(None, "https://i.imgur.com/3pyR0Vf.png"), |
|
type='messages', |
|
latex_delimiters=[{"left": "$$", "right": "$$", "display": True}, {"left": "$", "right": "$", "display": False}] |
|
) |
|
|
|
with gr.Row(): |
|
txt_input = gr.Textbox( |
|
scale=4, |
|
show_label=False, |
|
placeholder="e.g., 'I want to build a RAG pipeline from scratch'", |
|
container=False, |
|
) |
|
submit_btn = gr.Button("Send", variant="primary", scale=1, min_width=100) |
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("### Resources") |
|
file_uploader = gr.File( |
|
file_count="multiple", |
|
label="Upload for New Chat (Optional)", |
|
file_types=[".pdf", ".txt", ".docx"], |
|
visible=True, |
|
interactive=True, |
|
) |
|
new_session_btn = gr.Button("Start New Session", variant="secondary") |
|
tutor_prompt_header = gr.Markdown("### Tutor Persona Prompt", visible=False) |
|
explainer_prompt_display = gr.Textbox( |
|
label="You can modify the tutor's persona and instructions here:", |
|
lines=15, |
|
interactive=True, |
|
visible=False, |
|
) |
|
|
|
|
|
submit_actions = [txt_input, chatbot, app_state, file_uploader, explainer_prompt_display] |
|
output_components = [chatbot, app_state, file_uploader, tutor_prompt_header, explainer_prompt_display, txt_input] |
|
|
|
submit_btn.click( |
|
fn=respond, |
|
inputs=submit_actions, |
|
outputs=output_components, |
|
) |
|
|
|
txt_input.submit( |
|
fn=respond, |
|
inputs=submit_actions, |
|
outputs=output_components, |
|
) |
|
|
|
new_session_btn.click( |
|
fn=start_new_session, |
|
inputs=[], |
|
outputs=output_components |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
if not API_KEY: |
|
print("\n" + "="*60) |
|
print("CRITICAL ERROR: Cannot launch Gradio app.") |
|
print("Your GOOGLE_API_KEY is not set in the .env file.") |
|
print("="*60 + "\n") |
|
else: |
|
print("Launching Gradio app...") |
|
demo.queue().launch(debug=True) |
|
|
|
|