import io import os import requests from PIL import Image from typing import Optional, Tuple, List, Dict import gradio as gr from dotenv import load_dotenv import google.generativeai as genai import pdfplumber import tempfile from fpdf import FPDF import re # Import regex for parsing quiz # ───────────────────────────────────────────── # Load environment variables & Constants # ───────────────────────────────────────────── load_dotenv() OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") HF_TOKEN = os.getenv("HF_TOKEN") GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") MISTRAL_MODEL = "mistralai/mistral-7b-instruct" FLUX_MODEL_API = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell" OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" GEMINI_MODEL_NAME = "gemini-1.5-flash" PDF_TEXT_LIMIT = 8000 if not all([OPENROUTER_API_KEY, HF_TOKEN, GEMINI_API_KEY]): raise ValueError("❌ Missing one or more required environment variables.") # ───────────────────────────────────────────── # Configure Gemini # ───────────────────────────────────────────── genai.configure(api_key=GEMINI_API_KEY) gemini_model = genai.GenerativeModel(GEMINI_MODEL_NAME) # ───────────────────────────────────────────── # Prompt Templates # ───────────────────────────────────────────── LEVEL_PROMPTS = { "Kid": "Explain like I'm 7 years old: ", "Beginner": "Explain in simple terms: ", "Advanced": "Explain in technical detail: " } # ───────────────────────────────────────────── # UI HTML & CSS Styling # ───────────────────────────────────────────── LOTTIE_ANIMATION_HTML = """
""" # Combined CSS for dark mode, hover effect, and accordion styling APP_CSS = """ body.dark { --body-background-fill: #121212; --background-fill-primary: #1E1E1E; --background-fill-secondary: #2C2C2C; --text-color-primary: #FFFFFF; --text-color-secondary: #E0E0E0; --border-color-primary: #333333; --input-background-fill: #2C2C2C; --button-secondary-background-fill: #333333; --button-secondary-text-color: #FFFFFF; --button-primary-background-fill: #6d28d9; /* Purple */ --button-primary-border-color: #6d28d9; /* Purple */ --button-primary-text-color: #FFFFFF; } .dark .gradio-container { background: var(--body-background-fill); } .dark .gradio-tabs-nav { background: var(--background-fill-secondary); } /* Button Hover Effect */ button:hover { transition: all 0.3s ease; transform: scale(1.01); box-shadow: 0px 0px 8px rgba(255, 255, 255, 0.1); } /* Style for Accordions */ /* Adjusting background and text color for accordions in both modes */ .gradio-accordion.secondary > .label { background-color: var(--background-fill-primary); /* Use primary background for label */ color: var(--text-color-primary); border-color: var(--border-color-primary); } .gradio-accordion.secondary > .label:hover { background-color: var(--background-fill-secondary); /* Slightly change on hover */ } .gradio-accordion.secondary > .label > .icon { color: var(--text-color-primary); } .gradio-accordion.secondary > .panel { background-color: var(--background-fill-secondary); /* Secondary background for panel content */ border-color: var(--border-color-primary); } /* Style for Dropdowns inside Accordions */ .gradio-accordion .gr-form { /* Target forms/inputs inside accordion panels */ background-color: var(--background-fill-secondary); /* Ensure consistent background */ } .gradio-accordion .gr-dropdown-value { color: var(--text-color-primary); /* Set text color for dropdown */ } .gradio-accordion .gr-dropdown-options { background-color: var(--background-fill-secondary); /* Options background */ border-color: var(--border-color-primary); } .gradio-accordion .gr-dropdown-options .gr-dropdown-option { color: var(--text-color-primary); /* Options text color */ } .gradio-accordion .gr-dropdown-options .gr-dropdown-option:hover { background-color: var(--background-fill-primary); /* Option hover background */ } /* Style for feedback text */ #quiz_feedback_box { margin-top: 15px; /* Add space above feedback */ padding: 10px; border: 1px solid var(--border-color-primary); border-radius: 5px; } #quiz_feedback_box.correct { border-color: green; color: green; } #quiz_feedback_box.incorrect { border-color: red; color: red; } """ # ───────────────────────────────────────────── # API Calls # ───────────────────────────────────────────── def generate_mistral_response(prompt: str, system_message: str) -> str: headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} payload = { "model": MISTRAL_MODEL, "messages": [{"role": "system", "content": system_message}, {"role": "user", "content": prompt}], "temperature": 0.7 } try: response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload, timeout=45) response.raise_for_status() return response.json()["choices"][0]["message"]["content"].strip() except requests.exceptions.RequestException as e: return f"❌ Mistral Network Error: {e}" except (KeyError, IndexError): return "❌ Mistral API Error: Unexpected response format." except Exception as e: return f"❌ Mistral Error: {e}" def generate_diagram_image(prompt: str) -> Optional[str]: try: response = requests.post( FLUX_MODEL_API, headers={"Authorization": f"Bearer {HF_TOKEN}"}, json={"inputs": prompt}, timeout=60 ) response.raise_for_status() image = Image.open(io.BytesIO(response.content)) with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: image.save(temp_file.name, "PNG") return temp_file.name except requests.exceptions.RequestException as e: print(f"❌ FLUX Network Error: {e}") return None except Exception as e: print(f"❌ FLUX Error: {e}") return None def gemini_explain_file(file, question: Optional[str] = None) -> str: if not file: return "⚠️ No file uploaded." try: file_path = file if isinstance(file, str) else file.name if file_path.lower().endswith((".png", ".jpg", ".jpeg")): img = Image.open(file_path) prompt = f"Explain the science in this image. If there's a specific question, address it: {question}" if question else "Explain the science in this image." response = gemini_model.generate_content([prompt, img]) return response.text elif file_path.lower().endswith(".pdf"): with pdfplumber.open(file_path) as pdf: text = "\n".join(page.extract_text() or "" for page in pdf.pages) prompt = f"Explain the science in this PDF, focusing on this question: {question}\n\nPDF Content:\n{text[:PDF_TEXT_LIMIT]}" if question else f"Summarize and explain the science in this PDF:\n\n{text[:PDF_TEXT_LIMIT]}" response = gemini_model.generate_content(prompt) return response.text else: return "⚠️ Unsupported file type." except Exception as e: return f"❌ Gemini Error: {e}" # ───────────────────────────────────────────── # Feature Functions # ───────────────────────────────────────────── def generate_quiz(explanation_text: str) -> str: """Generates a 2-question quiz based on the explanation.""" if not explanation_text or "❌" in explanation_text or "⚠️" in explanation_text: return "Quiz could not be generated based on the explanation." # Prompt to ensure clear formatting for parsing, including the marker system_message = "You are a quiz creator. Based on the provided text, create a simple 2-question multiple-choice quiz. For each question, provide 3-4 options. Clearly indicate the correct answer by putting '(Correct answer)' immediately after it. Format each question as follows: 'Question #: [Question Text]\nA) Option A\nB) Option B\nC) Option C (Correct answer)\nD) Option D'. Put a blank line between questions. Do not include introductory or concluding remarks or explanations for the answers." prompt = f"Create a 2-question multiple-choice quiz from this explanation:\n\n{explanation_text[:PDF_TEXT_LIMIT*2]}" quiz_result = generate_mistral_response(prompt, system_message) if "❌" in quiz_result: return f"Could not generate quiz: {quiz_result}" return quiz_result def parse_quiz_text_for_dropdown(quiz_text: str) -> Tuple[List[Dict[str, any]], List[str]]: """ Parses AI quiz text. Returns a list of question data (label, choices) and a list of *clean* correct answers (without the marker) for state. """ questions_data = [] correct_answers_clean = [] # To store correct answers without marker question_blocks = quiz_text.strip().split('\n\n') for raw_block in question_blocks[:2]: # Process up to 2 questions if not raw_block.strip(): continue lines = raw_block.strip().split('\n') if not lines: continue q_text_line = lines[0].strip() q_text_match = re.match(r'^\s*Question\s*\d+:\s*(.*)$', q_text_line, flags=re.IGNORECASE) q_text = q_text_match.group(1).strip() if q_text_match else q_text_line options_for_dropdown = [] # Options without the marker correct_option_clean = None # Clean correct answer for state for line in lines[1:]: line = line.strip() if not line: continue # Check if it looks like an option line (starts with A), capture text before marker option_match = re.match(r'^[A-Z]\)\s*(.*?)(?:\s*\(Correct answer\))?\s*$', line, flags=re.IGNORECASE) if option_match: option_text_clean = option_match.group(1).strip() options_for_dropdown.append(option_text_clean) # Add clean text to dropdown options # Check if this is the correct answer if "(Correct answer)" in line: correct_option_clean = option_text_clean # Store the clean correct answer if q_text and options_for_dropdown: questions_data.append({ "label": q_text, "choices": options_for_dropdown # Use the cleaned options for dropdown }) # Store the clean correct answer, ensure it corresponds to this question correct_answers_clean.append(correct_option_clean if correct_option_clean else "No correct answer found") else: print(f"Warning: Could not fully parse quiz block for dropdowns: {raw_block[:100]}...") correct_answers_clean.append("Error parsing question") # Add placeholder # Ensure we always return exactly two correct answers (fill with None if fewer than 2 questions) while len(correct_answers_clean) < 2: correct_answers_clean.append(None) return questions_data, correct_answers_clean[:2] # Return parsed data and the list of correct answers # CHANGED: Grade quiz function takes user answers and state values def grade_quiz(q1_answer_user: str, q2_answer_user: str, correct_q1_stored: str, correct_q2_stored: str) -> Tuple[str, gr.update]: """Grades the user's answers based on the stored correct answers.""" feedback_lines = ["### Quiz Feedback"] total_correct = 0 graded_count = 0 # Grade Question 1 if correct_q1_stored is not None and "Error" not in str(correct_q1_stored): graded_count += 1 if q1_answer_user is None: feedback_lines.append(f"⚠️ Question 1: No answer selected. Correct was: '{correct_q1_stored}'") elif q1_answer_user == correct_q1_stored: feedback_lines.append(f"✅ Question 1: Correct!") total_correct += 1 else: feedback_lines.append(f"❌ Question 1: Incorrect. Your answer: '{q1_answer_user}'. Correct was: '{correct_q1_stored}'") elif q1_answer_user: # User selected an answer but question/correct answer wasn't parsed feedback_lines.append(f"⚠️ Question 1: Answer selected, but correct answer could not be determined.") # Grade Question 2 if correct_q2_stored is not None and "Error" not in str(correct_q2_stored): graded_count += 1 if q2_answer_user is None: feedback_lines.append(f"⚠️ Question 2: No answer selected. Correct was: '{correct_q2_stored}'") elif q2_answer_user == correct_q2_stored: feedback_lines.append(f"✅ Question 2: Correct!") total_correct += 1 else: feedback_lines.append(f"❌ Question 2: Incorrect. Your answer: '{q2_answer_user}'. Correct was: '{correct_q2_stored}'") elif q2_answer_user: # User selected an answer but question/correct answer wasn't parsed feedback_lines.append(f"⚠️ Question 2: Answer selected, but correct answer could not be determined.") # Overall score if graded_count > 0: score_message = f"Overall Score: {total_correct}/{graded_count}." feedback_lines.append(score_message) # Determine feedback box class based on score if total_correct == graded_count: feedback_css_class = "correct" elif total_correct > 0: feedback_css_class = "partial" # Using 'partial' class if you add one in CSS else: feedback_css_class = "incorrect" else: feedback_lines.append("No quiz questions were available to grade.") feedback_css_class = "" # No specific class if nothing graded feedback_text = "\n".join(feedback_lines) # Update feedback box visibility and value feedback_update = gr.update(value=feedback_text, visible=True, elem_classes=[feedback_css_class]) return feedback_update def create_report(report_format: str, explanation: str, image_path: Optional[str], raw_quiz_text: str) -> Optional[str]: """Creates a downloadable report in PDF or Markdown format.""" if not explanation: return None if report_format == "PDF": pdf = FPDF() pdf.add_page() pdf.set_font("Helvetica", 'B', 16) pdf.cell(0, 10, 'Science Report', 0, 1, 'C') pdf.ln(10) pdf.set_font("Helvetica", 'B', 14) pdf.cell(0, 10, 'Explanation', 0, 1) pdf.set_font("Helvetica", '', 12) try: pdf.multi_cell(0, 10, explanation.encode('latin-1', 'replace').decode('latin-1')) except Exception as e: print(f"PDF Explanation Encoding Error: {e}") pdf.multi_cell(0, 10, "Error encoding explanation text for PDF.") # Fallback pdf.ln(5) if image_path and os.path.exists(image_path): pdf.set_font("Helvetica", 'B', 14) pdf.cell(0, 10, 'Generated Diagram', 0, 1) pdf.ln(5) try: available_width = pdf.w - pdf.l_margin - pdf.r_margin pdf.image(image_path, x=pdf.get_x(), y=pdf.get_y(), w=available_width) # Move cursor down past where image would be (approximate height based on width) try: img_w, img_h = Image.open(image_path).size img_display_height = (img_h / img_w) * available_width pdf.ln(min(img_display_height, pdf.h - pdf.get_y() - pdf.b_margin) + 5) # Add space after image, don't go past page bottom except Exception as img_size_e: print(f"Could not get image size for PDF spacing: {img_size_e}") pdf.ln(100) # Fallback fixed spacing if size fails except Exception as e: print(f"PDF Image Error: {e}") pdf.ln(15) # Add some space even if image failed pdf.set_font("Helvetica", 'B', 14) pdf.cell(0, 10, 'Quiz', 0, 1) pdf.set_font("Helvetica", '', 12) # Use the raw quiz text for the report as it contains correct answers if raw_quiz_text and "Quiz could not be generated" not in raw_quiz_text and "❌" not in raw_quiz_text and "⚠️" not in raw_quiz_text: try: pdf.multi_cell(0, 10, raw_quiz_text.encode('latin-1', 'replace').decode('latin-1')) except Exception as e: print(f"PDF Quiz Encoding Error: {e}") pdf.multi_cell(0, 10, "Error encoding quiz text for PDF.") else: pdf.multi_cell(0, 10, "Quiz was not available.") pdf.ln(5) with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file: pdf.output(temp_file.name) return temp_file.name elif report_format == "Markdown": content = f"# Science Report\n\n## Explanation\n\n{explanation}\n\n" if image_path: content += "## Diagram\n\n(Note: The diagram image is a separate file.)\n\n" content += f"## Quiz\n\n{raw_quiz_text}\n" # Use raw text for Markdown too with tempfile.NamedTemporaryFile(delete=False, suffix=".md", mode='w', encoding='utf-8') as temp_file: temp_file.write(content) return temp_file.name return None # ───────────────────────────────────────────── # Main Handler # ───────────────────────────────────────────── # This function updates ALL relevant output components and state variables. # The order of returned values MUST match the outputs list in submit_btn.click def handle_combined_input(question, level, uploaded_file): explanation = "" image_path = None # This holds the temporary file path raw_quiz_text = "Your quiz will appear here..." # Store raw AI output for quiz reasoning = "" download_box_visible = gr.update(visible=False) # Assume hidden initially diagram_visible = gr.update(visible=False) # Hide diagram by default # --- Generation Logic --- if uploaded_file: explanation = gemini_explain_file(uploaded_file, question) reasoning = "🧠 Analysis powered by Google Gemini." image_path = None # No diagram for file upload diagram_visible = gr.update(visible=False) elif question and question.strip(): prompt = LEVEL_PROMPTS.get(level, "") + question explanation = generate_mistral_response(prompt, "You are a helpful science explainer.") reasoning = "🧠 Explanation powered by Mistral via OpenRouter." if "❌" not in explanation and "⚠️" not in explanation: # Only try image if explanation worked image_path = generate_diagram_image(question) if image_path: reasoning += "\n🎨 Diagram generated by FLUX via Hugging Face." diagram_visible = gr.update(visible=True) # Show diagram if successful else: reasoning += "\n❌ Diagram generation failed." diagram_visible = gr.update(visible=False) # Hide if failed else: image_path = None # If explanation failed, no image is generated diagram_visible = gr.update(visible=False) else: explanation = "⚠️ Please ask a science question or upload a file." reasoning = "" # Clear reasoning if no input image_path = None diagram_visible = gr.update(visible=False) # Generate quiz only if explanation was successful and not empty/warning parsed_quiz_data = [] # Initialize parsed data and correct answers correct_answers = [None, None] # Initialize state values if explanation.strip() and "❌" not in explanation and "⚠️" not in explanation: raw_quiz_text = generate_quiz(explanation) # Parse quiz text for dropdowns and get correct answers for state parsed_quiz_data, correct_answers = parse_quiz_text_for_dropdown(raw_quiz_text) # Only show download box if explanation AND quiz generation seem OK if "❌" not in raw_quiz_text and "⚠️" not in raw_quiz_text and raw_quiz_text.strip() and "Quiz could not be generated" not in raw_quiz_text: download_box_visible = gr.update(visible=True) # If quiz generation failed, raw_quiz_text will contain the error message # --- Prepare UI Updates --- # Updates for Quiz Question 1 Dropdown if len(parsed_quiz_data) > 0: q1_label = parsed_quiz_data[0]["label"] q1_choices = parsed_quiz_data[0]["choices"] q1_update = gr.update( label=f"Question 1: {q1_label}", # Add "Question 1:" prefix choices=q1_choices, value=None, # Reset value visible=True # Make visible ) else: # If no Q1, hide the dropdown q1_update = gr.update(label="Quiz Question 1", choices=[], value=None, visible=False) # Updates for Quiz Question 2 Dropdown if len(parsed_quiz_data) > 1: q2_label = parsed_quiz_data[1]["label"] q2_choices = parsed_quiz_data[1]["choices"] q2_update = gr.update( label=f"Question 2: {q2_label}", # Add "Question 2:" prefix choices=q2_choices, value=None, # Reset value visible=True # Make visible ) else: # If no Q2, hide the dropdown q2_update = gr.update(label="Quiz Question 2", choices=[], value=None, visible=False) # Update the raw quiz markdown text display # Only show the raw text if quiz generation provided some content if raw_quiz_text.strip() and "Your quiz will appear here" not in raw_quiz_text: raw_quiz_markdown_update = gr.update(value=f"**Raw Quiz Output:**\n\n{raw_quiz_text}", visible=True) else: raw_quiz_markdown_update = gr.update(value="", visible=False) # Hide quiz feedback initially or if quiz generation failed quiz_feedback_update = gr.update(value="", visible=False, elem_classes="") # Also clear feedback class # Show submit button only if at least one question was parsed submit_quiz_btn_update = gr.update(visible= True if len(parsed_quiz_data) > 0 else False) # Return all the updated components' values/updates # Order MUST match the outputs list in submit_btn.click return (explanation, # explanation_out image_path, # diagram_out (path to temp file) diagram_visible, # diagram_out visibility update reasoning, # reasoning_out q1_update, # quiz_q1 dropdown update q2_update, # quiz_q2 dropdown update raw_quiz_markdown_update, # raw_quiz_markdown update download_box_visible, # download_box visibility update submit_quiz_btn_update, # submit_quiz_btn visibility update quiz_feedback_update, # quiz_feedback visibility/value update correct_answers[0], # correct_q1_state (value update) correct_answers[1] # correct_q2_state (value update) ) # ───────────────────────────────────────────── # Launch Gradio App # ───────────────────────────────────────────── def launch_app(): # Use the combined CSS with gr.Blocks(theme=gr.themes.Soft(), css=APP_CSS) as demo: gr.HTML(LOTTIE_ANIMATION_HTML) gr.Markdown("# 📚 ExplainAnything.AI\nYour personal AI-powered science tutor.") # Hidden state variables to store correct answers correct_q1_state = gr.State(value=None) correct_q2_state = gr.State(value=None) with gr.Row(): with gr.Column(scale=1): question = gr.Textbox(label="Ask a science question", placeholder="e.g., How does photosynthesis work?", info="Ask any science-related question.") level = gr.Radio(choices=["Kid", "Beginner", "Advanced"], value="Beginner", label="Explanation Level", info="Choose how detailed the explanation should be.") uploaded_file = gr.File(label="Upload Image/PDF", file_types=["image", ".pdf"]) with gr.Row(): toggle_theme_btn = gr.Button("Toggle Theme 🌒/☀️") submit_btn = gr.Button("Generate Explanation", variant="primary") with gr.Column(scale=2): # Use Accordions - added elem_classes for styling with gr.Accordion("📘 Explanation", open=True, elem_classes="secondary"): explanation_out = gr.Textbox(lines=8, label="Explanation", interactive=False) reasoning_out = gr.Textbox(lines=2, label="🧠 Processing Details", interactive=False) with gr.Accordion("🖼️ Diagram", open=False, elem_classes="secondary"): diagram_out = gr.Image(label="Generated Diagram", interactive=False, show_label=True, visible=False) with gr.Accordion("🧪 Quiz", open=False, elem_classes="secondary") as quiz_accordion: gr.Markdown("Test Your Knowledge:") # Pre-defined Dropdown menus for Quiz Questions (up to 2 questions) quiz_q1 = gr.Dropdown(label="Quiz Question 1", choices=[], value=None, interactive=True, visible=False) quiz_q2 = gr.Dropdown(label="Quiz Question 2", choices=[], value=None, interactive=True, visible=False) # Raw quiz text output - changed from Markdown to Textbox as in your provided code raw_quiz_markdown = gr.Markdown(value="Your quiz will appear here...", label="Raw Quiz Data", visible=False) # Switched back to Markdown submit_quiz_btn = gr.Button("Submit Answers", variant="secondary", visible=False) # Feedback textbox - initially hidden, given an ID for CSS styling quiz_feedback = gr.Textbox(label="Quiz Feedback", lines=3, interactive=False, visible=False, elem_id="quiz_feedback_box") with gr.Group(visible=False) as download_box: gr.Markdown("### 📥 Download Report") with gr.Row(): report_format = gr.Radio(["PDF", "Markdown"], label="Choose Format", value="PDF") download_btn = gr.Button("Download") download_file = gr.File(label="Your report is ready to download", interactive=False) # --- Event Handlers --- # Inputs for handle_combined_input: question, level, uploaded_file # Outputs: explanation_out, diagram_out (value), diagram_out (visibility), # reasoning_out (value), quiz_q1 (update), quiz_q2 (update), # raw_quiz_markdown (update), download_box (visibility), # submit_quiz_btn (visibility), quiz_feedback (visibility), # correct_q1_state (update), correct_q2_state (update) # The order here MUST match the return order in handle_combined_input submit_btn.click( fn=handle_combined_input, inputs=[question, level, uploaded_file], outputs=[explanation_out, diagram_out, diagram_out, # Diagram value and visibility reasoning_out, quiz_q1, quiz_q2, # Dropdown updates raw_quiz_markdown, # Raw text update download_box, # Download box visibility submit_quiz_btn, # Submit quiz button visibility quiz_feedback, # Feedback box visibility/value correct_q1_state, # State update for correct answer 1 correct_q2_state # State update for correct answer 2 ] ) # Handle quiz submission # Inputs: Current value of quiz_q1, quiz_q2, and the state variables holding correct answers # Output: Update the quiz_feedback textbox submit_quiz_btn.click( fn=grade_quiz, inputs=[quiz_q1, quiz_q2, correct_q1_state, correct_q2_state], # Get selected answers AND stored correct answers outputs=[quiz_feedback] # Update the feedback textbox ) toggle_theme_btn.click( fn=None, inputs=None, outputs=None, js="() => { document.body.classList.toggle('dark'); }" ) # Inputs for create_report need the CURRENT values from the outputs AFTER submit is run # Inputs: report_format (from radio), explanation (from explanation_out), # image_path (from diagram_out), raw_quiz_text (from raw_quiz_markdown) # Output: download_file (the downloadable file) # Note: We pass the component objects themselves, Gradio gets their current VALUE download_btn.click( fn=create_report, inputs=[report_format, explanation_out, diagram_out, raw_quiz_markdown], outputs=[download_file] ) print("Launching Gradio app with a public link...") demo.launch(share=True, debug=True) # ───────────────────────────────────────────── # Run App # ───────────────────────────────────────────── if __name__ == "__main__": launch_app()