Spaces:
Running
Running
File size: 45,235 Bytes
c493ef5 3d31350 e368dbe c493ef5 cd7a21b c493ef5 89a534d c493ef5 3d31350 139d47a c493ef5 3c6142d 89a534d 3c6142d 3d31350 c493ef5 89a534d c493ef5 e368dbe c493ef5 e368dbe c493ef5 e368dbe c493ef5 89a534d 3c6142d c493ef5 3c6142d 89a534d c493ef5 3c6142d 89a534d c493ef5 3d31350 6de00c6 3d31350 c493ef5 3d31350 c493ef5 3d31350 c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d c493ef5 3c6142d cd7a21b c493ef5 3c6142d 89a534d 3c6142d bc6ee15 c493ef5 89a534d c493ef5 89a534d 3c6142d 89a534d 3c6142d c493ef5 e368dbe c493ef5 89a534d 3c6142d c493ef5 e368dbe 3c6142d e368dbe c493ef5 89a534d 3c6142d e368dbe 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d bc6ee15 3c6142d bc6ee15 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d 3c6142d 89a534d bc6ee15 89a534d c493ef5 89a534d 3c6142d 89a534d c493ef5 bc6ee15 e368dbe 89a534d 3c6142d 89a534d c493ef5 89a534d c493ef5 89a534d 3c6142d 89a534d c493ef5 89a534d 3c6142d 89a534d c493ef5 89a534d c493ef5 89a534d c493ef5 89a534d c493ef5 89a534d c493ef5 89a534d c493ef5 89a534d c493ef5 89a534d 3c6142d 89a534d 3c6142d 89a534d c493ef5 89bfce1 c493ef5 d2c2f81 8d5be29 c493ef5 89a534d c493ef5 92c2824 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 |
import os
import json
import re
import gradio as gr
import asyncio
import logging
import torch
import random
from serpapi import GoogleSearch
from pydantic import BaseModel
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination
from autogen_agentchat.teams import Swarm
from autogen_agentchat.ui import Console
from autogen_agentchat.messages import TextMessage, HandoffMessage, StructuredMessage
from autogen_ext.models.anthropic import AnthropicChatCompletionClient
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.models.ollama import OllamaChatCompletionClient
from markdown_pdf import MarkdownPdf, Section
import traceback
import soundfile as sf
import tempfile
from pydub import AudioSegment
from TTS.api import TTS
# Set up logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("lecture_generation.log"),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Set up environment
# For Huggingface Spaces, use /tmp for temporary storage
if os.path.exists("/tmp"):
OUTPUT_DIR = "/tmp/outputs" # Use /tmp for Huggingface Spaces
else:
OUTPUT_DIR = os.path.join(os.getcwd(), "outputs") # Fallback for local dev
os.makedirs(OUTPUT_DIR, exist_ok=True)
logger.info(f"Using output directory: {OUTPUT_DIR}")
os.environ["COQUI_TOS_AGREED"] = "1"
# Define Pydantic model for slide data
class Slide(BaseModel):
title: str
content: str
class SlidesOutput(BaseModel):
slides: list[Slide]
# Define search_web tool using SerpApi
def search_web(query: str, serpapi_key: str) -> str:
try:
params = {
"q": query,
"engine": "google",
"api_key": serpapi_key,
"num": 5
}
search = GoogleSearch(params)
results = search.get_dict()
if "error" in results:
logger.error("SerpApi error: %s", results["error"])
return f"Error during search: {results['error']}"
if "organic_results" not in results or not results["organic_results"]:
logger.info("No search results found for query: %s", query)
return f"No results found for query: {query}"
formatted_results = []
for item in results["organic_results"][:5]:
title = item.get("title", "No title")
snippet = item.get("snippet", "No snippet")
link = item.get("link", "No link")
formatted_results.append(f"Title: {title}\nSnippet: {snippet}\nLink: {link}\n")
formatted_output = "\n".join(formatted_results)
logger.info("Successfully retrieved search results for query: %s", query)
return f"Search results for {query}:\n{formatted_output}"
except Exception as e:
logger.error("Unexpected error during search: %s", str(e))
return f"Unexpected error during search: {str(e)}"
# Define helper function for progress HTML
def html_with_progress(label, progress):
return f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<div style="width: 100%; background-color: #FFFFFF; border-radius: 10px; overflow: hidden; margin-bottom: 20px;">
<div style="width: {progress}%; height: 30px; background-color: #4CAF50; border-radius: 10px;"></div>
</div>
<h2 style="font-style: italic; color: #555;">{label}</h2>
</div>
"""
# Function to get model client based on selected service
def get_model_client(service, api_key):
if service == "OpenAI-gpt-4o-2024-08-06":
return OpenAIChatCompletionClient(model="gpt-4o-2024-08-06", api_key=api_key)
elif service == "Anthropic-claude-3-sonnet-20240229":
return AnthropicChatCompletionClient(model="claude-3-sonnet-20240229", api_key=api_key)
elif service == "Google-gemini-1.5-flash":
return OpenAIChatCompletionClient(model="gemini-1.5-flash", api_key=api_key)
elif service == "Ollama-llama3.2":
return OllamaChatCompletionClient(model="llama3.2")
else:
raise ValueError("Invalid service")
# Helper function to clean script text and make it natural
def clean_script_text(script):
if not script or not isinstance(script, str):
logger.error("Invalid script input: %s", script)
return None
# Minimal cleaning to preserve natural language
script = re.sub(r"\*\*Slide \d+:.*?\*\*", "", script) # Remove slide headers
script = re.sub(r"\[.*?\]", "", script) # Remove bracketed content
script = re.sub(r"Title:.*?\n|Content:.*?\n", "", script) # Remove metadata
script = script.replace("humanlike", "human-like").replace("problemsolving", "problem-solving")
script = re.sub(r"\s+", " ", script).strip() # Normalize whitespace
# Convert bullet points to spoken cues
script = re.sub(r"^\s*-\s*", "So, ", script, flags=re.MULTILINE)
# Add non-verbal words randomly (e.g., "um," "you know," "like")
non_verbal = ["um, ", "you know, ", "like, "]
words = script.split()
for i in range(len(words) - 1, -1, -1):
if random.random() < 0.1: # 10% chance per word
words.insert(i, random.choice(non_verbal))
script = " ".join(words)
# Basic validation
if len(script) < 10:
logger.error("Cleaned script too short (%d characters): %s", len(script), script)
return None
logger.info("Cleaned and naturalized script: %s", script)
return script
# Helper function to validate and convert speaker audio (MP3 or WAV)
async def validate_and_convert_speaker_audio(speaker_audio):
if not speaker_audio or not os.path.exists(speaker_audio):
logger.warning("Speaker audio file does not exist: %s. Using default voice.", speaker_audio)
default_voice = os.path.join(os.path.dirname(__file__), "feynman.mp3")
if os.path.exists(default_voice):
speaker_audio = default_voice
else:
logger.error("Default voice not found. Cannot proceed with TTS.")
return None
try:
# Check file extension
ext = os.path.splitext(speaker_audio)[1].lower()
if ext == ".mp3":
logger.info("Converting MP3 to WAV: %s", speaker_audio)
audio = AudioSegment.from_mp3(speaker_audio)
# Convert to mono, 22050 Hz
audio = audio.set_channels(1).set_frame_rate(22050)
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=OUTPUT_DIR) as temp_file:
audio.export(temp_file.name, format="wav")
speaker_wav = temp_file.name
elif ext == ".wav":
speaker_wav = speaker_audio
else:
logger.error("Unsupported audio format: %s", ext)
return None
# Validate WAV file
data, samplerate = sf.read(speaker_wav)
if samplerate < 16000 or samplerate > 48000:
logger.error("Invalid sample rate for %s: %d Hz", speaker_wav, samplerate)
return None
if len(data) < 16000:
logger.error("Speaker audio too short: %d frames", len(data))
return None
if data.ndim == 2:
logger.info("Converting stereo WAV to mono: %s", speaker_wav)
data = data.mean(axis=1)
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=OUTPUT_DIR) as temp_file:
sf.write(temp_file.name, data, samplerate)
speaker_wav = temp_file.name
logger.info("Validated speaker audio: %s", speaker_wav)
return speaker_wav
except Exception as e:
logger.error("Failed to validate or convert speaker audio %s: %s", speaker_audio, str(e))
return None
# Helper function to generate audio using Coqui TTS API
def generate_xtts_audio(tts, text, speaker_wav, output_path):
if not tts:
logger.error("TTS model not initialized")
return False
try:
tts.tts_to_file(text=text, speaker_wav=speaker_wav, language="en", file_path=output_path)
logger.info("Generated audio for %s", output_path)
return True
except Exception as e:
logger.error("Failed to generate audio for %s: %s", output_path, str(e))
return False
# Helper function to extract JSON from messages
def extract_json_from_message(message):
if isinstance(message, TextMessage):
content = message.content
logger.debug("Extracting JSON from TextMessage: %s", content)
if not isinstance(content, str):
logger.warning("TextMessage content is not a string: %s", content)
return None
# Try standard JSON block with triple backticks
pattern = r"```json\s*(.*?)\s*```"
match = re.search(pattern, content, re.DOTALL)
if match:
try:
json_str = match.group(1).strip()
logger.debug("Found JSON in code block: %s", json_str)
return json.loads(json_str)
except json.JSONDecodeError as e:
logger.error("Failed to parse JSON from code block: %s", e)
# Try to find arrays or objects
json_patterns = [
r"\[\s*\{.*?\}\s*\]", # Array of objects
r"\{\s*\".*?\"\s*:.*?\}", # Object
]
for pattern in json_patterns:
match = re.search(pattern, content, re.DOTALL)
if match:
try:
json_str = match.group(0).strip()
logger.debug("Found JSON with pattern %s: %s", pattern, json_str)
return json.loads(json_str)
except json.JSONDecodeError as e:
logger.error("Failed to parse JSON with pattern %s: %s", pattern, e)
# Try to find JSON anywhere in the content
try:
for i in range(len(content)):
for j in range(len(content), i, -1):
substring = content[i:j].strip()
if (substring.startswith('{') and substring.endswith('}')) or \
(substring.startswith('[') and substring.endswith(']')):
try:
parsed = json.loads(substring)
if isinstance(parsed, (list, dict)):
logger.info("Found JSON in substring: %s", substring)
return parsed
except json.JSONDecodeError:
continue
except Exception as e:
logger.error("Error in JSON substring search: %s", e)
logger.warning("No JSON found in TextMessage content")
return None
elif isinstance(message, StructuredMessage):
content = message.content
logger.debug("Extracting JSON from StructuredMessage: %s", content)
try:
if isinstance(content, BaseModel):
content_dict = content.dict()
return content_dict.get("slides", content_dict)
return content
except Exception as e:
logger.error("Failed to extract JSON from StructuredMessage: %s, Content: %s", e, content)
return None
elif isinstance(message, HandoffMessage):
logger.debug("Extracting JSON from HandoffMessage context")
for ctx_msg in message.context:
if hasattr(ctx_msg, "content"):
content = ctx_msg.content
logger.debug("HandoffMessage context content: %s", content)
if isinstance(content, str):
pattern = r"```json\s*(.*?)\s*```"
match = re.search(pattern, content, re.DOTALL)
if match:
try:
return json.loads(match.group(1))
except json.JSONDecodeError as e:
logger.error("Failed to parse JSON from HandoffMessage: %s", e)
json_patterns = [
r"\[\s*\{.*?\}\s*\]", # Array of objects
r"\{\s*\".*?\"\s*:.*?\}", # Object
]
for pattern in json_patterns:
match = re.search(pattern, content, re.DOTALL)
if match:
try:
return json.loads(match.group(0))
except json.JSONDecodeError as e:
logger.error("Failed to parse JSON with pattern %s: %s", pattern, e)
elif isinstance(content, dict):
return content.get("slides", content)
logger.warning("No JSON found in HandoffMessage context")
return None
logger.warning("Unsupported message type for JSON extraction: %s", type(message))
return None
# Function to generate HTML slides
def generate_html_slides(slides, title):
try:
slides_html = ""
for i, slide in enumerate(slides):
content_lines = slide['content'].replace('\n', '<br>')
slide_html = f"""
<div id="slide-{i+1}" class="slide" style="display: none; height: 100%; padding: 20px; text-align: center;">
<h1 style="margin-bottom: 10px;">{slide['title']}</h1>
<h3 style="margin-bottom: 20px; font-style: italic;">Prof. AI Feynman<br>Princeton University, April 26th, 2025</h3>
<div style="font-size: 1.2em; line-height: 1.6;">{content_lines}</div>
</div>
"""
slides_html += slide_html
logger.info(f"Generated HTML slides for: {title}")
return slides_html
except Exception as e:
logger.error(f"Failed to generate HTML slides: {str(e)}")
logger.error(traceback.format_exc())
return None
# Async function to update audio preview
async def update_audio_preview(audio_file):
if audio_file:
logger.info("Updating audio preview for file: %s", audio_file)
return audio_file
return None
# Async function to generate lecture materials and audio
async def on_generate(api_service, api_key, serpapi_key, title, topic, instructions, lecture_type, speaker_audio, num_slides):
if not serpapi_key:
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">SerpApi key required</h2>
<p style="margin-top: 20px;">Please provide a valid SerpApi key and try again.</p>
</div>
"""
return
# Initialize TTS model
tts = None
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
logger.info("TTS model initialized on %s", device)
except Exception as e:
logger.error("Failed to initialize TTS model: %s", str(e))
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">TTS model initialization failed</h2>
<p style="margin-top: 20px;">Error: {str(e)}</p>
<p>Please ensure the Coqui TTS model is properly installed and try again.</p>
</div>
"""
return
model_client = get_model_client(api_service, api_key)
actual_content_slides = num_slides
total_slides = actual_content_slides + 3 # Content slides + quiz, assignment, thank-you
research_agent = AssistantAgent(
name="research_agent",
model_client=model_client,
handoffs=["slide_agent"],
system_message="You are a Research Agent. Use the search_web tool to gather information on the topic and keywords from the initial message. Summarize the findings concisely in a single message, then use the handoff_to_slide_agent tool to pass the task to the Slide Agent. Do not produce any other output.",
tools=[search_web]
)
slide_agent = AssistantAgent(
name="slide_agent",
model_client=model_client,
handoffs=["script_agent"],
system_message=f"""
You are a Slide Agent. Using the research from the conversation history and the specified number of content slides ({actual_content_slides}), generate exactly {actual_content_slides} content slides, plus one quiz slide, one assignment slide, and one thank-you slide, for a total of {total_slides} slides. Output ONLY a JSON array wrapped in ```json ... ``` in a TextMessage, where each slide is an object with 'title' and 'content' keys. Do not include any explanatory text, comments, or other messages. Ensure the JSON is valid and contains exactly {total_slides} slides before proceeding. After outputting the JSON, use the handoff_to_script_agent tool to pass the task to the Script Agent.
Example output for 2 content slides:
```json
[
{{"title": "Slide 1", "content": "Content for slide 1"}},
{{"title": "Slide 2", "content": "Content for slide 2"}},
{{"title": "Quiz", "content": "Quiz questions"}},
{{"title": "Assignment", "content": "Assignment details"}},
{{"title": "Thank You", "content": "Thank you message"}}
]
```""",
output_content_type=None,
reflect_on_tool_use=False
)
script_agent = AssistantAgent(
name="script_agent",
model_client=model_client,
handoffs=["feynman_agent"],
system_message=f"""
You are a Script Agent. Access the JSON array of {total_slides} slides from the conversation history. Generate a narration script (1-2 sentences) for each of the {total_slides} slides, summarizing its content in a natural, conversational tone as a speaker would, including occasional non-verbal words (e.g., "um," "you know," "like"). Output ONLY a JSON array wrapped in ```json ... ``` with exactly {total_slides} strings, one script per slide, in the same order. Ensure the JSON is valid and complete. After outputting, use the handoff_to_feynman_agent tool. If scripts cannot be generated, retry once.
Example for 3 content slides:
```json
[
"So, this slide, um, covers the main topic in a fun way.",
"The second slide introduces the key concepts.",
"This third slide shows some interesting applications.",
"Alright, you know, answer these quiz questions.",
"Here's your, like, assignment to complete.",
"Thanks for, um, attending today!"
]
```""",
output_content_type=None,
reflect_on_tool_use=False
)
feynman_agent = AssistantAgent(
name="feynman_agent",
model_client=model_client,
handoffs=[],
system_message=f"""
You are Agent Feynman. Review the slides and scripts from the conversation history to ensure coherence, completeness, and that exactly {total_slides} slides and {total_slides} scripts are received. Output a confirmation message summarizing the number of slides and scripts received. If slides or scripts are missing, invalid, or do not match the expected count ({total_slides}), report the issue clearly. Use 'TERMINATE' to signal completion.
Example: 'Received {total_slides} slides and {total_slides} scripts. Lecture is coherent. TERMINATE'
""")
swarm = Swarm(
participants=[research_agent, slide_agent, script_agent, feynman_agent],
termination_condition=HandoffTermination(target="user") | TextMentionTermination("TERMINATE")
)
progress = 0
label = "Research: in progress..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
initial_message = f"""
Lecture Title: {title}
Topic: {topic}
Additional Instructions: {instructions}
Audience: {lecture_type}
Number of Content Slides: {actual_content_slides}
Please start by researching the topic.
"""
logger.info("Starting lecture generation for topic: %s with %d content slides", topic, actual_content_slides)
slides = None
scripts = None
error_html = """
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Failed to generate lecture materials</h2>
<p style="margin-top: 20px;">Please try again with different parameters or a different model.</p>
</div>
"""
try:
logger.info("Research Agent starting...")
task_result = await Console(swarm.run_stream(task=initial_message))
logger.info("Swarm execution completed")
slide_retry_count = 0
script_retry_count = 0
max_retries = 2
for message in task_result.messages:
source = getattr(message, 'source', getattr(message, 'sender', None))
logger.debug("Processing message from %s, type: %s", source, type(message))
if isinstance(message, HandoffMessage):
logger.info("Handoff from %s to %s", source, message.target)
if source == "research_agent" and message.target == "slide_agent":
progress = 25
label = "Slides: generating..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
elif source == "slide_agent" and message.target == "script_agent":
if slides is None:
logger.warning("Slide Agent handoff without slides JSON")
extracted_json = extract_json_from_message(message)
if extracted_json:
slides = extracted_json
logger.info("Extracted slides JSON from HandoffMessage context: %s", slides)
if slides is None or len(slides) != total_slides:
if slide_retry_count < max_retries:
slide_retry_count += 1
logger.info("Retrying slide generation (attempt %d/%d)", slide_retry_count, max_retries)
retry_message = TextMessage(
content=f"Please generate exactly {total_slides} slides ({actual_content_slides} content slides plus quiz, assignment, thank-you) as per your instructions.",
source="user",
recipient="slide_agent"
)
task_result.messages.append(retry_message)
continue
progress = 50
label = "Scripts: generating..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
elif source == "script_agent" and message.target == "feynman_agent":
if scripts is None:
logger.warning("Script Agent handoff without scripts JSON")
extracted_json = extract_json_from_message(message)
if extracted_json:
scripts = extracted_json
logger.info("Extracted scripts JSON from HandoffMessage context: %s", scripts)
progress = 75
label = "Review: in progress..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
elif source == "research_agent" and isinstance(message, TextMessage) and "handoff_to_slide_agent" in message.content:
logger.info("Research Agent completed research")
progress = 25
label = "Slides: generating..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
elif source == "slide_agent" and isinstance(message, (TextMessage, StructuredMessage)):
logger.debug("Slide Agent message received")
extracted_json = extract_json_from_message(message)
if extracted_json:
slides = extracted_json
logger.info("Slide Agent generated %d slides", len(slides))
if len(slides) != total_slides:
if slide_retry_count < max_retries:
slide_retry_count += 1
logger.info("Retrying slide generation (attempt %d/%d)", slide_retry_count, max_retries)
retry_message = TextMessage(
content=f"Please generate exactly {total_slides} slides ({actual_content_slides} content slides plus quiz, assignment, thank-you) as per your instructions.",
source="user",
recipient="slide_agent"
)
task_result.messages.append(retry_message)
continue
# Save slide content to individual files
for i, slide in enumerate(slides):
content_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_content.txt")
try:
with open(content_file, "w", encoding="utf-8") as f:
f.write(slide["content"])
logger.info("Saved slide content to %s", content_file)
except Exception as e:
logger.error("Error saving slide content to %s: %s", content_file, str(e))
progress = 50
label = "Scripts: generating..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
else:
logger.warning("No JSON extracted from slide_agent message")
if slide_retry_count < max_retries:
slide_retry_count += 1
logger.info("Retrying slide generation (attempt %d/%d)", slide_retry_count, max_retries)
retry_message = TextMessage(
content=f"Please generate exactly {total_slides} slides ({actual_content_slides} content slides plus quiz, assignment, thank-you) as per your instructions.",
source="user",
recipient="slide_agent"
)
task_result.messages.append(retry_message)
continue
elif source == "script_agent" and isinstance(message, (TextMessage, StructuredMessage)):
logger.debug("Script Agent message received")
extracted_json = extract_json_from_message(message)
if extracted_json:
scripts = extracted_json
logger.info("Script Agent generated scripts for %d slides", len(scripts))
# Save raw scripts to individual files
for i, script in enumerate(scripts):
script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_raw_script.txt")
try:
with open(script_file, "w", encoding="utf-8") as f:
f.write(script)
logger.info("Saved raw script to %s", script_file)
except Exception as e:
logger.error("Error saving raw script to %s: %s", script_file, str(e))
progress = 75
label = "Scripts generated and saved. Reviewing..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
else:
logger.warning("No JSON extracted from script_agent message")
if script_retry_count < max_retries:
script_retry_count += 1
logger.info("Retrying script generation (attempt %d/%d)", script_retry_count, max_retries)
retry_message = TextMessage(
content=f"Please generate exactly {total_slides} scripts for the {total_slides} slides as per your instructions.",
source="user",
recipient="script_agent"
)
task_result.messages.append(retry_message)
continue
elif source == "feynman_agent" and isinstance(message, TextMessage) and "TERMINATE" in message.content:
logger.info("Feynman Agent completed lecture review: %s", message.content)
progress = 90
label = "Lecture materials ready. Generating audio..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
logger.info("Slides state: %s", "Generated" if slides else "None")
logger.info("Scripts state: %s", "Generated" if scripts else "None")
if not slides or not scripts:
error_message = f"Failed to generate {'slides and scripts' if not slides and not scripts else 'slides' if not slides else 'scripts'}"
error_message += f". Received {len(slides) if slides else 0} slides and {len(scripts) if scripts else 0} scripts."
logger.error("%s", error_message)
logger.debug("Dumping all messages for debugging:")
for msg in task_result.messages:
source = getattr(msg, 'source', getattr(msg, 'sender', None))
logger.debug("Message from %s, type: %s, content: %s", source, type(msg), msg.to_text() if hasattr(msg, 'to_text') else str(msg))
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">{error_message}</h2>
<p style="margin-top: 20px;">Please try again with a different model (e.g., Anthropic-claude-3-sonnet-20240229) or simplify the topic/instructions.</p>
</div>
"""
return
if len(slides) != total_slides:
logger.error("Expected %d slides (including %d content slides + 3), but received %d", total_slides, actual_content_slides, len(slides))
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Incorrect number of slides</h2>
<p style="margin-top: 20px;">Expected {total_slides} slides ({actual_content_slides} content slides + quiz, assignment, thank-you), but generated {len(slides)}. Please try again.</p>
</div>
"""
return
if not isinstance(scripts, list) or not all(isinstance(s, str) for s in scripts):
logger.error("Scripts are not a list of strings: %s", scripts)
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Invalid script format</h2>
<p style="margin-top: 20px;">Scripts must be a list of strings. Please try again.</p>
</div>
"""
return
if len(scripts) != total_slides:
logger.error("Mismatch between number of slides (%d) and scripts (%d)", len(slides), len(scripts))
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Mismatch in slides and scripts</h2>
<p style="margin-top: 20px;">Generated {len(slides)} slides but {len(scripts)} scripts. Please try again.</p>
</div>
"""
return
# Generate HTML slides
slides_html = generate_html_slides(slides, title)
if not slides_html:
logger.error("Failed to generate HTML slides")
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Failed to generate slides</h2>
<p style="margin-top: 20px;">Please try again.</p>
</div>
"""
return
audio_files = []
validated_speaker_wav = await validate_and_convert_speaker_audio(speaker_audio)
if not validated_speaker_wav:
logger.error("Invalid speaker audio after conversion, skipping TTS")
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Invalid speaker audio</h2>
<p style="margin-top: 20px;">Please upload a valid MP3 or WAV audio file and try again.</p>
</div>
"""
return
# Process audio generation sequentially with retries
for i, script in enumerate(scripts):
cleaned_script = clean_script_text(script)
audio_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}.wav")
script_file = os.path.join(OUTPUT_DIR, f"slide_{i+1}_script.txt")
# Save cleaned script
try:
with open(script_file, "w", encoding="utf-8") as f:
f.write(cleaned_script or "")
logger.info("Saved cleaned script to %s: %s", script_file, cleaned_script)
except Exception as e:
logger.error("Error saving cleaned script to %s: %s", script_file, str(e))
if not cleaned_script:
logger.error("Skipping audio for slide %d due to empty or invalid script", i + 1)
audio_files.append(None)
progress = 90 + ((i + 1) / len(scripts)) * 10
label = f"Generated audio for slide {i + 1}/{len(scripts)}..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
continue
max_audio_retries = 2
for attempt in range(max_audio_retries + 1):
try:
current_text = cleaned_script
if attempt > 0:
sentences = re.split(r"[.!?]+", cleaned_script)
sentences = [s.strip() for s in sentences if s.strip()][:2]
current_text = ". ".join(sentences) + "."
logger.info("Retry %d for slide %d with simplified text: %s", attempt, i + 1, current_text)
success = generate_xtts_audio(tts, current_text, validated_speaker_wav, audio_file)
if not success:
raise RuntimeError("TTS generation failed")
logger.info("Generated audio for slide %d: %s", i + 1, audio_file)
audio_files.append(audio_file)
progress = 90 + ((i + 1) / len(scripts)) * 10
label = f"Generated audio for slide {i + 1}/{len(scripts)}..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
break
except Exception as e:
logger.error("Error generating audio for slide %d (attempt %d): %s\n%s", i + 1, attempt, str(e), traceback.format_exc())
if attempt == max_audio_retries:
logger.error("Max retries reached for slide %d, skipping", i + 1)
audio_files.append(None)
progress = 90 + ((i + 1) / len(scripts)) * 10
label = f"Generated audio for slide {i + 1}/{len(scripts)}..."
yield html_with_progress(label, progress)
await asyncio.sleep(0.1)
break
slides_info = json.dumps({"slides": [
{"title": slide["title"], "content": slide["content"]}
for slide in slides
], "audioFiles": audio_files})
html_output = f"""
<div id="lecture-container" style="height: 700px; border: 1px solid #ddd; border-radius: 8px; display: flex; flex-direction: column; justify-content: space-between;">
<div id="slide-content" style="flex: 1; overflow: auto;">
{slides_html}
</div>
<div style="padding: 20px;">
<div id="progress-bar" style="width: 100%; height: 5px; background-color: #ddd; border-radius: 2px; margin-bottom: 10px;">
<div id="progress-fill" style="width: {(1/len(slides)*100)}%; height: 100%; background-color: #4CAF50; border-radius: 2px;"></div>
</div>
<div style="display: flex; justify-content: center; margin-bottom: 10px;">
<button onclick="prevSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏮</button>
<button onclick="togglePlay()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏯</button>
<button onclick="nextSlide()" style="border-radius: 50%; width: 40px; height: 40px; margin: 0 5px; font-size: 1.2em; cursor: pointer;">⏭</button>
</div>
<p id="slide-counter" style="text-align: center;">Slide 1 of {len(slides)}</p>
</div>
</div>
<script>
const lectureData = {slides_info};
let currentSlide = 0;
const totalSlides = lectureData.slides.length;
const slideCounter = document.getElementById('slide-counter');
const progressFill = document.getElementById('progress-fill');
let audioElements = [];
let currentAudio = null;
for (let i = 0; i < totalSlides; i++) {{
if (lectureData.audioFiles && lectureData.audioFiles[i]) {{
const audio = new Audio(lectureData.audioFiles[i]);
audioElements.push(audio);
}} else {{
audioElements.push(null);
}}
}}
function updateSlide() {{
for (let i = 1; i <= totalSlides; i++) {{
document.getElementById(`slide-${{i}}`).style.display = (i - 1 === currentSlide) ? 'block' : 'none';
}}
slideCounter.textContent = `Slide ${{currentSlide + 1}} of ${{totalSlides}}`;
progressFill.style.width = `${{(currentSlide + 1) / totalSlides * 100}}%`;
if (currentAudio) {{
currentAudio.pause();
currentAudio.currentTime = 0;
}}
if (audioElements[currentSlide]) {{
currentAudio = audioElements[currentSlide];
currentAudio.play().catch(e => console.error('Audio play failed:', e));
}} else {{
currentAudio = null;
}}
}}
function prevSlide() {{
if (currentSlide > 0) {{
currentSlide--;
updateSlide();
}}
}}
function nextSlide() {{
if (currentSlide < totalSlides - 1) {{
currentSlide++;
updateSlide();
}}
}}
function togglePlay() {{
if (!audioElements[currentSlide]) return;
if (currentAudio.paused) {{
currentAudio.play().catch(e => console.error('Audio play failed:', e));
}} else {{
currentAudio.pause();
}}
}}
audioElements.forEach((audio, index) => {{
if (audio) {{
audio.addEventListener('ended', () => {{
if (index < totalSlides - 1) {{
nextSlide();
}}
}});
}}
}});
// Initialize first slide
updateSlide();
</script>
"""
logger.info("Lecture generation completed successfully")
yield html_output
except Exception as e:
logger.error("Error during lecture generation: %s\n%s", str(e), traceback.format_exc())
yield f"""
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="color: #d9534f;">Error during lecture generation</h2>
<p style="margin-top: 10px; font-size: 16px;">{str(e)}</p>
<p style="margin-top: 20px;">Please try again or adjust your inputs.</p>
</div>
"""
return
# Gradio interface
with gr.Blocks(title="Agent Feynman") as demo:
gr.Markdown("# <center>Learn Anything With Professor AI Feynman</center>")
with gr.Row():
with gr.Column(scale=1):
with gr.Group():
title = gr.Textbox(label="Lecture Title", placeholder="e.g. Introduction to AI")
topic = gr.Textbox(label="Topic", placeholder="e.g. Artificial Intelligence")
instructions = gr.Textbox(label="Additional Instructions", placeholder="e.g. Focus on recent advancements")
lecture_type = gr.Dropdown(["Conference", "University", "High school"], label="Audience", value="University")
api_service = gr.Dropdown(
choices=[
"OpenAI-gpt-4o-2024-08-06",
"Anthropic-claude-3-sonnet-20240229",
"Google-gemini-1.5-flash",
"Ollama-llama3.2"
],
label="Model",
value="Google-gemini-1.5-flash"
)
api_key = gr.Textbox(label="Model Provider API Key", type="password", placeholder="Not required for Ollama")
serpapi_key = gr.Textbox(label="SerpApi Key", type="password", placeholder="Enter your SerpApi key")
num_slides = gr.Slider(1, 20, step=1, label="Number of Content Slides", value=3)
speaker_audio = gr.Audio(label="Speaker sample audio (MP3 or WAV)", type="filepath", elem_id="speaker-audio")
generate_btn = gr.Button("Generate Lecture")
with gr.Column(scale=2):
default_slide_html = """
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; height: 100%; min-height: 700px; padding: 20px; text-align: center; border: 1px solid #ddd; border-radius: 8px;">
<h2 style="font-style: italic; color: #555;">Waiting for lecture content...</h2>
<p style="margin-top: 10px; font-size: 16px;">Please Generate lecture content via the form on the left first before lecture begins</p>
</div>
"""
slide_display = gr.HTML(label="Lecture Slides", value=default_slide_html)
speaker_audio.change(
fn=update_audio_preview,
inputs=speaker_audio,
outputs=speaker_audio
)
generate_btn.click(
fn=on_generate,
inputs=[api_service, api_key, serpapi_key, title, topic, instructions, lecture_type, speaker_audio, num_slides],
outputs=[slide_display]
)
if __name__ == "__main__":
demo.launch() |