|
import io |
|
import os |
|
import requests |
|
from PIL import Image |
|
from typing import Optional, Tuple, List, Dict |
|
import gradio as gr |
|
from dotenv import load_dotenv |
|
import google.generativeai as genai |
|
import pdfplumber |
|
import tempfile |
|
from fpdf import FPDF |
|
import re |
|
|
|
|
|
|
|
|
|
load_dotenv() |
|
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") |
|
|
|
MISTRAL_MODEL = "mistralai/mistral-7b-instruct" |
|
FLUX_MODEL_API = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell" |
|
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" |
|
GEMINI_MODEL_NAME = "gemini-1.5-flash" |
|
PDF_TEXT_LIMIT = 8000 |
|
|
|
if not all([OPENROUTER_API_KEY, HF_TOKEN, GEMINI_API_KEY]): |
|
raise ValueError("β Missing one or more required environment variables.") |
|
|
|
|
|
|
|
|
|
genai.configure(api_key=GEMINI_API_KEY) |
|
gemini_model = genai.GenerativeModel(GEMINI_MODEL_NAME) |
|
|
|
|
|
|
|
|
|
LEVEL_PROMPTS = { |
|
"Kid": "Explain like I'm 7 years old: ", |
|
"Beginner": "Explain in simple terms: ", |
|
"Advanced": "Explain in technical detail: " |
|
} |
|
|
|
|
|
|
|
|
|
LOTTIE_ANIMATION_HTML = """ |
|
<div style="text-align: center; margin-bottom: 20px;"> |
|
<script src="https://unpkg.com/@lottiefiles/lottie-player@latest/dist/lottie-player.js"></script> |
|
<lottie-player src="https://assets5.lottiefiles.com/packages/lf20_M9p23l.json" |
|
background="transparent" speed="1" style="width: 200px; height: 200px; margin: auto;" |
|
loop autoplay></lottie-player> |
|
</div> |
|
""" |
|
|
|
|
|
APP_CSS = """ |
|
body.dark { |
|
--body-background-fill: #121212; |
|
--background-fill-primary: #1E1E1E; |
|
--background-fill-secondary: #2C2C2C; |
|
--text-color-primary: #FFFFFF; |
|
--text-color-secondary: #E0E0E0; |
|
--border-color-primary: #333333; |
|
--input-background-fill: #2C2C2C; |
|
--button-secondary-background-fill: #333333; |
|
--button-secondary-text-color: #FFFFFF; |
|
--button-primary-background-fill: #6d28d9; /* Purple */ |
|
--button-primary-border-color: #6d28d9; /* Purple */ |
|
--button-primary-text-color: #FFFFFF; |
|
} |
|
.dark .gradio-container { background: var(--body-background-fill); } |
|
.dark .gradio-tabs-nav { background: var(--background-fill-secondary); } |
|
|
|
/* Button Hover Effect */ |
|
button:hover { |
|
transition: all 0.3s ease; |
|
transform: scale(1.01); |
|
box-shadow: 0px 0px 8px rgba(255, 255, 255, 0.1); |
|
} |
|
|
|
/* Style for Accordions */ |
|
/* Adjusting background and text color for accordions in both modes */ |
|
.gradio-accordion.secondary > .label { |
|
background-color: var(--background-fill-primary); /* Use primary background for label */ |
|
color: var(--text-color-primary); |
|
border-color: var(--border-color-primary); |
|
} |
|
.gradio-accordion.secondary > .label:hover { |
|
background-color: var(--background-fill-secondary); /* Slightly change on hover */ |
|
} |
|
.gradio-accordion.secondary > .label > .icon { |
|
color: var(--text-color-primary); |
|
} |
|
.gradio-accordion.secondary > .panel { |
|
background-color: var(--background-fill-secondary); /* Secondary background for panel content */ |
|
border-color: var(--border-color-primary); |
|
} |
|
|
|
/* Style for Dropdowns inside Accordions */ |
|
.gradio-accordion .gr-form { /* Target forms/inputs inside accordion panels */ |
|
background-color: var(--background-fill-secondary); /* Ensure consistent background */ |
|
} |
|
.gradio-accordion .gr-dropdown-value { |
|
color: var(--text-color-primary); /* Set text color for dropdown */ |
|
} |
|
.gradio-accordion .gr-dropdown-options { |
|
background-color: var(--background-fill-secondary); /* Options background */ |
|
border-color: var(--border-color-primary); |
|
} |
|
.gradio-accordion .gr-dropdown-options .gr-dropdown-option { |
|
color: var(--text-color-primary); /* Options text color */ |
|
} |
|
.gradio-accordion .gr-dropdown-options .gr-dropdown-option:hover { |
|
background-color: var(--background-fill-primary); /* Option hover background */ |
|
} |
|
|
|
/* Style for feedback text */ |
|
#quiz_feedback_box { |
|
margin-top: 15px; /* Add space above feedback */ |
|
padding: 10px; |
|
border: 1px solid var(--border-color-primary); |
|
border-radius: 5px; |
|
} |
|
#quiz_feedback_box.correct { |
|
border-color: green; |
|
color: green; |
|
} |
|
#quiz_feedback_box.incorrect { |
|
border-color: red; |
|
color: red; |
|
} |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
def generate_mistral_response(prompt: str, system_message: str) -> str: |
|
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} |
|
payload = { |
|
"model": MISTRAL_MODEL, |
|
"messages": [{"role": "system", "content": system_message}, {"role": "user", "content": prompt}], |
|
"temperature": 0.7 |
|
} |
|
try: |
|
response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload, timeout=45) |
|
response.raise_for_status() |
|
return response.json()["choices"][0]["message"]["content"].strip() |
|
except requests.exceptions.RequestException as e: |
|
return f"β Mistral Network Error: {e}" |
|
except (KeyError, IndexError): |
|
return "β Mistral API Error: Unexpected response format." |
|
except Exception as e: |
|
return f"β Mistral Error: {e}" |
|
|
|
|
|
def generate_diagram_image(prompt: str) -> Optional[str]: |
|
try: |
|
response = requests.post( |
|
FLUX_MODEL_API, |
|
headers={"Authorization": f"Bearer {HF_TOKEN}"}, |
|
json={"inputs": prompt}, |
|
timeout=60 |
|
) |
|
response.raise_for_status() |
|
image = Image.open(io.BytesIO(response.content)) |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: |
|
image.save(temp_file.name, "PNG") |
|
return temp_file.name |
|
except requests.exceptions.RequestException as e: |
|
print(f"β FLUX Network Error: {e}") |
|
return None |
|
except Exception as e: |
|
print(f"β FLUX Error: {e}") |
|
return None |
|
|
|
|
|
def gemini_explain_file(file, question: Optional[str] = None) -> str: |
|
if not file: return "β οΈ No file uploaded." |
|
try: |
|
file_path = file if isinstance(file, str) else file.name |
|
|
|
if file_path.lower().endswith((".png", ".jpg", ".jpeg")): |
|
img = Image.open(file_path) |
|
prompt = f"Explain the science in this image. If there's a specific question, address it: {question}" if question else "Explain the science in this image." |
|
response = gemini_model.generate_content([prompt, img]) |
|
return response.text |
|
elif file_path.lower().endswith(".pdf"): |
|
with pdfplumber.open(file_path) as pdf: |
|
text = "\n".join(page.extract_text() or "" for page in pdf.pages) |
|
prompt = f"Explain the science in this PDF, focusing on this question: {question}\n\nPDF Content:\n{text[:PDF_TEXT_LIMIT]}" if question else f"Summarize and explain the science in this PDF:\n\n{text[:PDF_TEXT_LIMIT]}" |
|
response = gemini_model.generate_content(prompt) |
|
return response.text |
|
else: |
|
return "β οΈ Unsupported file type." |
|
except Exception as e: |
|
return f"β Gemini Error: {e}" |
|
|
|
|
|
|
|
|
|
|
|
def generate_quiz(explanation_text: str) -> str: |
|
"""Generates a 2-question quiz based on the explanation.""" |
|
if not explanation_text or "β" in explanation_text or "β οΈ" in explanation_text: |
|
return "Quiz could not be generated based on the explanation." |
|
|
|
|
|
system_message = "You are a quiz creator. Based on the provided text, create a simple 2-question multiple-choice quiz. For each question, provide 3-4 options. Clearly indicate the correct answer by putting '(Correct answer)' immediately after it. Format each question as follows: 'Question #: [Question Text]\nA) Option A\nB) Option B\nC) Option C (Correct answer)\nD) Option D'. Put a blank line between questions. Do not include introductory or concluding remarks or explanations for the answers." |
|
prompt = f"Create a 2-question multiple-choice quiz from this explanation:\n\n{explanation_text[:PDF_TEXT_LIMIT*2]}" |
|
quiz_result = generate_mistral_response(prompt, system_message) |
|
|
|
if "β" in quiz_result: |
|
return f"Could not generate quiz: {quiz_result}" |
|
return quiz_result |
|
|
|
|
|
def parse_quiz_text_for_dropdown(quiz_text: str) -> Tuple[List[Dict[str, any]], List[str]]: |
|
""" |
|
Parses AI quiz text. Returns a list of question data (label, choices) |
|
and a list of *clean* correct answers (without the marker) for state. |
|
""" |
|
questions_data = [] |
|
correct_answers_clean = [] |
|
|
|
question_blocks = quiz_text.strip().split('\n\n') |
|
|
|
for raw_block in question_blocks[:2]: |
|
if not raw_block.strip(): continue |
|
|
|
lines = raw_block.strip().split('\n') |
|
if not lines: continue |
|
|
|
q_text_line = lines[0].strip() |
|
q_text_match = re.match(r'^\s*Question\s*\d+:\s*(.*)$', q_text_line, flags=re.IGNORECASE) |
|
q_text = q_text_match.group(1).strip() if q_text_match else q_text_line |
|
|
|
options_for_dropdown = [] |
|
correct_option_clean = None |
|
|
|
for line in lines[1:]: |
|
line = line.strip() |
|
if not line: continue |
|
|
|
option_match = re.match(r'^[A-Z]\)\s*(.*?)(?:\s*\(Correct answer\))?\s*$', line, flags=re.IGNORECASE) |
|
if option_match: |
|
option_text_clean = option_match.group(1).strip() |
|
options_for_dropdown.append(option_text_clean) |
|
|
|
|
|
if "(Correct answer)" in line: |
|
correct_option_clean = option_text_clean |
|
|
|
if q_text and options_for_dropdown: |
|
questions_data.append({ |
|
"label": q_text, |
|
"choices": options_for_dropdown |
|
}) |
|
|
|
correct_answers_clean.append(correct_option_clean if correct_option_clean else "No correct answer found") |
|
else: |
|
print(f"Warning: Could not fully parse quiz block for dropdowns: {raw_block[:100]}...") |
|
correct_answers_clean.append("Error parsing question") |
|
|
|
|
|
|
|
while len(correct_answers_clean) < 2: |
|
correct_answers_clean.append(None) |
|
|
|
return questions_data, correct_answers_clean[:2] |
|
|
|
|
|
|
|
def grade_quiz(q1_answer_user: str, q2_answer_user: str, correct_q1_stored: str, correct_q2_stored: str) -> Tuple[str, gr.update]: |
|
"""Grades the user's answers based on the stored correct answers.""" |
|
feedback_lines = ["### Quiz Feedback"] |
|
total_correct = 0 |
|
graded_count = 0 |
|
|
|
|
|
if correct_q1_stored is not None and "Error" not in str(correct_q1_stored): |
|
graded_count += 1 |
|
if q1_answer_user is None: |
|
feedback_lines.append(f"β οΈ Question 1: No answer selected. Correct was: '{correct_q1_stored}'") |
|
elif q1_answer_user == correct_q1_stored: |
|
feedback_lines.append(f"β
Question 1: Correct!") |
|
total_correct += 1 |
|
else: |
|
feedback_lines.append(f"β Question 1: Incorrect. Your answer: '{q1_answer_user}'. Correct was: '{correct_q1_stored}'") |
|
elif q1_answer_user: |
|
feedback_lines.append(f"β οΈ Question 1: Answer selected, but correct answer could not be determined.") |
|
|
|
|
|
|
|
if correct_q2_stored is not None and "Error" not in str(correct_q2_stored): |
|
graded_count += 1 |
|
if q2_answer_user is None: |
|
feedback_lines.append(f"β οΈ Question 2: No answer selected. Correct was: '{correct_q2_stored}'") |
|
elif q2_answer_user == correct_q2_stored: |
|
feedback_lines.append(f"β
Question 2: Correct!") |
|
total_correct += 1 |
|
else: |
|
feedback_lines.append(f"β Question 2: Incorrect. Your answer: '{q2_answer_user}'. Correct was: '{correct_q2_stored}'") |
|
elif q2_answer_user: |
|
feedback_lines.append(f"β οΈ Question 2: Answer selected, but correct answer could not be determined.") |
|
|
|
|
|
if graded_count > 0: |
|
score_message = f"Overall Score: {total_correct}/{graded_count}." |
|
feedback_lines.append(score_message) |
|
|
|
|
|
if total_correct == graded_count: |
|
feedback_css_class = "correct" |
|
elif total_correct > 0: |
|
feedback_css_class = "partial" |
|
else: |
|
feedback_css_class = "incorrect" |
|
else: |
|
feedback_lines.append("No quiz questions were available to grade.") |
|
feedback_css_class = "" |
|
|
|
|
|
feedback_text = "\n".join(feedback_lines) |
|
|
|
|
|
feedback_update = gr.update(value=feedback_text, visible=True, elem_classes=[feedback_css_class]) |
|
|
|
return feedback_update |
|
|
|
|
|
def create_report(report_format: str, explanation: str, image_path: Optional[str], raw_quiz_text: str) -> Optional[str]: |
|
"""Creates a downloadable report in PDF or Markdown format.""" |
|
if not explanation: |
|
return None |
|
|
|
if report_format == "PDF": |
|
pdf = FPDF() |
|
pdf.add_page() |
|
pdf.set_font("Helvetica", 'B', 16) |
|
pdf.cell(0, 10, 'Science Report', 0, 1, 'C') |
|
pdf.ln(10) |
|
|
|
pdf.set_font("Helvetica", 'B', 14) |
|
pdf.cell(0, 10, 'Explanation', 0, 1) |
|
pdf.set_font("Helvetica", '', 12) |
|
try: |
|
pdf.multi_cell(0, 10, explanation.encode('latin-1', 'replace').decode('latin-1')) |
|
except Exception as e: |
|
print(f"PDF Explanation Encoding Error: {e}") |
|
pdf.multi_cell(0, 10, "Error encoding explanation text for PDF.") |
|
pdf.ln(5) |
|
|
|
if image_path and os.path.exists(image_path): |
|
pdf.set_font("Helvetica", 'B', 14) |
|
pdf.cell(0, 10, 'Generated Diagram', 0, 1) |
|
pdf.ln(5) |
|
try: |
|
available_width = pdf.w - pdf.l_margin - pdf.r_margin |
|
pdf.image(image_path, x=pdf.get_x(), y=pdf.get_y(), w=available_width) |
|
|
|
try: |
|
img_w, img_h = Image.open(image_path).size |
|
img_display_height = (img_h / img_w) * available_width |
|
pdf.ln(min(img_display_height, pdf.h - pdf.get_y() - pdf.b_margin) + 5) |
|
except Exception as img_size_e: |
|
print(f"Could not get image size for PDF spacing: {img_size_e}") |
|
pdf.ln(100) |
|
|
|
except Exception as e: |
|
print(f"PDF Image Error: {e}") |
|
pdf.ln(15) |
|
|
|
|
|
pdf.set_font("Helvetica", 'B', 14) |
|
pdf.cell(0, 10, 'Quiz', 0, 1) |
|
pdf.set_font("Helvetica", '', 12) |
|
|
|
if raw_quiz_text and "Quiz could not be generated" not in raw_quiz_text and "β" not in raw_quiz_text and "β οΈ" not in raw_quiz_text: |
|
try: |
|
pdf.multi_cell(0, 10, raw_quiz_text.encode('latin-1', 'replace').decode('latin-1')) |
|
except Exception as e: |
|
print(f"PDF Quiz Encoding Error: {e}") |
|
pdf.multi_cell(0, 10, "Error encoding quiz text for PDF.") |
|
else: |
|
pdf.multi_cell(0, 10, "Quiz was not available.") |
|
pdf.ln(5) |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file: |
|
pdf.output(temp_file.name) |
|
return temp_file.name |
|
|
|
elif report_format == "Markdown": |
|
content = f"# Science Report\n\n## Explanation\n\n{explanation}\n\n" |
|
if image_path: |
|
content += "## Diagram\n\n(Note: The diagram image is a separate file.)\n\n" |
|
content += f"## Quiz\n\n{raw_quiz_text}\n" |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".md", mode='w', encoding='utf-8') as temp_file: |
|
temp_file.write(content) |
|
return temp_file.name |
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def handle_combined_input(question, level, uploaded_file): |
|
explanation = "" |
|
image_path = None |
|
raw_quiz_text = "Your quiz will appear here..." |
|
reasoning = "" |
|
download_box_visible = gr.update(visible=False) |
|
diagram_visible = gr.update(visible=False) |
|
|
|
|
|
if uploaded_file: |
|
explanation = gemini_explain_file(uploaded_file, question) |
|
reasoning = "π§ Analysis powered by Google Gemini." |
|
image_path = None |
|
diagram_visible = gr.update(visible=False) |
|
elif question and question.strip(): |
|
prompt = LEVEL_PROMPTS.get(level, "") + question |
|
explanation = generate_mistral_response(prompt, "You are a helpful science explainer.") |
|
reasoning = "π§ Explanation powered by Mistral via OpenRouter." |
|
if "β" not in explanation and "β οΈ" not in explanation: |
|
image_path = generate_diagram_image(question) |
|
if image_path: |
|
reasoning += "\nπ¨ Diagram generated by FLUX via Hugging Face." |
|
diagram_visible = gr.update(visible=True) |
|
else: |
|
reasoning += "\nβ Diagram generation failed." |
|
diagram_visible = gr.update(visible=False) |
|
else: |
|
image_path = None |
|
diagram_visible = gr.update(visible=False) |
|
else: |
|
explanation = "β οΈ Please ask a science question or upload a file." |
|
reasoning = "" |
|
image_path = None |
|
diagram_visible = gr.update(visible=False) |
|
|
|
|
|
parsed_quiz_data = [] |
|
correct_answers = [None, None] |
|
|
|
if explanation.strip() and "β" not in explanation and "β οΈ" not in explanation: |
|
raw_quiz_text = generate_quiz(explanation) |
|
|
|
parsed_quiz_data, correct_answers = parse_quiz_text_for_dropdown(raw_quiz_text) |
|
|
|
|
|
if "β" not in raw_quiz_text and "β οΈ" not in raw_quiz_text and raw_quiz_text.strip() and "Quiz could not be generated" not in raw_quiz_text: |
|
download_box_visible = gr.update(visible=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(parsed_quiz_data) > 0: |
|
q1_label = parsed_quiz_data[0]["label"] |
|
q1_choices = parsed_quiz_data[0]["choices"] |
|
q1_update = gr.update( |
|
label=f"Question 1: {q1_label}", |
|
choices=q1_choices, |
|
value=None, |
|
visible=True |
|
) |
|
else: |
|
|
|
q1_update = gr.update(label="Quiz Question 1", choices=[], value=None, visible=False) |
|
|
|
|
|
|
|
if len(parsed_quiz_data) > 1: |
|
q2_label = parsed_quiz_data[1]["label"] |
|
q2_choices = parsed_quiz_data[1]["choices"] |
|
q2_update = gr.update( |
|
label=f"Question 2: {q2_label}", |
|
choices=q2_choices, |
|
value=None, |
|
visible=True |
|
) |
|
else: |
|
|
|
q2_update = gr.update(label="Quiz Question 2", choices=[], value=None, visible=False) |
|
|
|
|
|
|
|
if raw_quiz_text.strip() and "Your quiz will appear here" not in raw_quiz_text: |
|
raw_quiz_markdown_update = gr.update(value=f"**Raw Quiz Output:**\n\n{raw_quiz_text}", visible=True) |
|
else: |
|
raw_quiz_markdown_update = gr.update(value="", visible=False) |
|
|
|
|
|
quiz_feedback_update = gr.update(value="", visible=False, elem_classes="") |
|
|
|
submit_quiz_btn_update = gr.update(visible= True if len(parsed_quiz_data) > 0 else False) |
|
|
|
|
|
|
|
|
|
return (explanation, |
|
image_path, |
|
diagram_visible, |
|
reasoning, |
|
q1_update, |
|
q2_update, |
|
raw_quiz_markdown_update, |
|
download_box_visible, |
|
submit_quiz_btn_update, |
|
quiz_feedback_update, |
|
correct_answers[0], |
|
correct_answers[1] |
|
) |
|
|
|
|
|
|
|
|
|
|
|
def launch_app(): |
|
|
|
with gr.Blocks(theme=gr.themes.Soft(), css=APP_CSS) as demo: |
|
gr.HTML(LOTTIE_ANIMATION_HTML) |
|
gr.Markdown("# π ExplainAnything.AI\nYour personal AI-powered science tutor.") |
|
|
|
|
|
correct_q1_state = gr.State(value=None) |
|
correct_q2_state = gr.State(value=None) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
question = gr.Textbox(label="Ask a science question", placeholder="e.g., How does photosynthesis work?", info="Ask any science-related question.") |
|
level = gr.Radio(choices=["Kid", "Beginner", "Advanced"], value="Beginner", label="Explanation Level", info="Choose how detailed the explanation should be.") |
|
uploaded_file = gr.File(label="Upload Image/PDF", file_types=["image", ".pdf"]) |
|
|
|
with gr.Row(): |
|
toggle_theme_btn = gr.Button("Toggle Theme π/βοΈ") |
|
submit_btn = gr.Button("Generate Explanation", variant="primary") |
|
|
|
with gr.Column(scale=2): |
|
|
|
with gr.Accordion("π Explanation", open=True, elem_classes="secondary"): |
|
explanation_out = gr.Textbox(lines=8, label="Explanation", interactive=False) |
|
reasoning_out = gr.Textbox(lines=2, label="π§ Processing Details", interactive=False) |
|
|
|
with gr.Accordion("πΌοΈ Diagram", open=False, elem_classes="secondary"): |
|
diagram_out = gr.Image(label="Generated Diagram", interactive=False, show_label=True, visible=False) |
|
|
|
with gr.Accordion("π§ͺ Quiz", open=False, elem_classes="secondary") as quiz_accordion: |
|
gr.Markdown("Test Your Knowledge:") |
|
|
|
quiz_q1 = gr.Dropdown(label="Quiz Question 1", choices=[], value=None, interactive=True, visible=False) |
|
quiz_q2 = gr.Dropdown(label="Quiz Question 2", choices=[], value=None, interactive=True, visible=False) |
|
|
|
raw_quiz_markdown = gr.Markdown(value="Your quiz will appear here...", label="Raw Quiz Data", visible=False) |
|
submit_quiz_btn = gr.Button("Submit Answers", variant="secondary", visible=False) |
|
|
|
quiz_feedback = gr.Textbox(label="Quiz Feedback", lines=3, interactive=False, visible=False, elem_id="quiz_feedback_box") |
|
|
|
|
|
with gr.Group(visible=False) as download_box: |
|
gr.Markdown("### π₯ Download Report") |
|
with gr.Row(): |
|
report_format = gr.Radio(["PDF", "Markdown"], label="Choose Format", value="PDF") |
|
download_btn = gr.Button("Download") |
|
download_file = gr.File(label="Your report is ready to download", interactive=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
submit_btn.click( |
|
fn=handle_combined_input, |
|
inputs=[question, level, uploaded_file], |
|
outputs=[explanation_out, diagram_out, diagram_out, |
|
reasoning_out, |
|
quiz_q1, quiz_q2, |
|
raw_quiz_markdown, |
|
download_box, |
|
submit_quiz_btn, |
|
quiz_feedback, |
|
correct_q1_state, |
|
correct_q2_state |
|
] |
|
) |
|
|
|
|
|
|
|
|
|
submit_quiz_btn.click( |
|
fn=grade_quiz, |
|
inputs=[quiz_q1, quiz_q2, correct_q1_state, correct_q2_state], |
|
outputs=[quiz_feedback] |
|
) |
|
|
|
toggle_theme_btn.click( |
|
fn=None, inputs=None, outputs=None, |
|
js="() => { document.body.classList.toggle('dark'); }" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
download_btn.click( |
|
fn=create_report, |
|
inputs=[report_format, explanation_out, diagram_out, raw_quiz_markdown], |
|
outputs=[download_file] |
|
) |
|
|
|
print("Launching Gradio app with a public link...") |
|
demo.launch(share=True, debug=True) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
launch_app() |