Nymbo commited on
Commit
0c8e311
·
verified ·
1 Parent(s): ee92c7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -56
app.py CHANGED
@@ -1,12 +1,11 @@
1
  # Purpose: One Space that offers up to seven tools/tabs (all exposed as MCP tools):
2
  # 1) Fetch — convert webpages to clean Markdown format
3
- # 2) DuckDuckGo Search — compact JSONL search output (short keys to minimize tokens)
4
- # 3) Python Code Executorrun Python code and capture stdout/errors
5
- # 4) Kokoro TTS — synthesize speech from text using Kokoro-82M with 54 voice options
6
  # 5) Memory Manager — lightweight JSON-based local memory store
7
- # 6) Image Generation - HF serverless inference providers (requires HF_READ_TOKEN)
8
- # 7) Video Generation - HF serverless inference providers (requires HF_READ_TOKEN)
9
- # 8) Deep Research
10
 
11
  from __future__ import annotations
12
 
@@ -349,7 +348,7 @@ def _truncate_markdown(markdown: str, max_chars: int) -> Tuple[str, Dict[str, an
349
  return truncated + truncation_notice, metadata
350
 
351
 
352
- def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
353
  url: Annotated[str, "The absolute URL to fetch (must return HTML)."],
354
  max_chars: Annotated[int, "Maximum characters to return (0 = no limit, full page content)."] = 3000,
355
  strip_selectors: Annotated[str, "CSS selectors to remove (comma-separated, e.g., '.header, .footer, nav')."] = "",
@@ -375,10 +374,10 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
375
  depending on the url_scraper setting. Content is length-limited by max_chars
376
  and includes detailed truncation metadata when content is truncated.
377
  """
378
- _log_call_start("Fetch_Webpage", url=url, max_chars=max_chars, strip_selectors=strip_selectors, url_scraper=url_scraper, offset=offset)
379
  if not url or not url.strip():
380
  result = "Please enter a valid URL."
381
- _log_call_end("Fetch_Webpage", _truncate_for_log(result))
382
  return result
383
 
384
  try:
@@ -386,14 +385,14 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
386
  resp.raise_for_status()
387
  except requests.exceptions.RequestException as e:
388
  result = f"An error occurred: {e}"
389
- _log_call_end("Fetch_Webpage", _truncate_for_log(result))
390
  return result
391
 
392
  final_url = str(resp.url)
393
  ctype = resp.headers.get("Content-Type", "")
394
  if "html" not in ctype.lower():
395
  result = f"Unsupported content type for extraction: {ctype or 'unknown'}"
396
- _log_call_end("Fetch_Webpage", _truncate_for_log(result))
397
  return result
398
 
399
  # Decode to text
@@ -419,7 +418,7 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
419
  if offset > 0:
420
  if offset >= len(full_result):
421
  result = f"Offset {offset} exceeds content length ({len(full_result)} characters). Content ends at position {len(full_result)}."
422
- _log_call_end("Fetch_Webpage", _truncate_for_log(result))
423
  return result
424
  result = full_result[offset:]
425
  else:
@@ -433,12 +432,12 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
433
  metadata["total_chars_estimate"] = len(full_result)
434
  metadata["next_cursor"] = offset + metadata["next_cursor"] if metadata["next_cursor"] else None
435
 
436
- _log_call_end("Fetch_Webpage", f"chars={len(result)}, url_scraper={url_scraper}, offset={offset}")
437
  return result
438
 
439
 
440
  # ============================================
441
- # DuckDuckGo Search: Enhanced with error handling & rate limiting
442
  # ============================================
443
 
444
  import asyncio
@@ -527,7 +526,7 @@ class SlowHost(Exception):
527
  def _fetch_page_markdown_fast(url: str, max_chars: int = 3000, timeout: float = 10.0) -> str:
528
  """Fetch a single URL quickly; raise SlowHost on timeout.
529
 
530
- Uses a shorter HTTP timeout to detect slow hosts, then reuses Fetch_Webpage
531
  logic for conversion to Markdown. Returns empty string on non-timeout errors.
532
  """
533
  try:
@@ -545,7 +544,7 @@ def _fetch_page_markdown_fast(url: str, max_chars: int = 3000, timeout: float =
545
  if "html" not in ctype.lower():
546
  return ""
547
 
548
- # Decode to text and convert similar to Fetch_Webpage (lean path)
549
  resp.encoding = resp.encoding or resp.apparent_encoding
550
  html = resp.text
551
  soup = BeautifulSoup(html, "lxml")
@@ -672,7 +671,7 @@ def _format_search_result(result: dict, search_type: str, index: int) -> list[st
672
  return lines
673
 
674
 
675
- def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
676
  query: Annotated[str, "The search query (supports operators like site:, quotes, OR)."],
677
  max_results: Annotated[int, "Number of results to return (1–20)."] = 5,
678
  page: Annotated[int, "Page number for pagination (1-based, each page contains max_results items)."] = 1,
@@ -680,7 +679,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
680
  offset: Annotated[int, "Result offset to start from (overrides page if > 0, for precise continuation)."] = 0,
681
  ) -> str:
682
  """
683
- Run a DuckDuckGo search and return formatted results with support for multiple content types.
684
 
685
  Features smart fallback: if 'news' search returns no results, automatically retries with 'text'
686
  search to catch sources like Hacker News that might not appear in news-specific results.
@@ -709,10 +708,10 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
709
  If 'news' search fails, results include a note about automatic fallback to 'text' search.
710
  Includes next_offset information for easy continuation.
711
  """
712
- _log_call_start("Search_DuckDuckGo", query=query, max_results=max_results, page=page, search_type=search_type, offset=offset)
713
  if not query or not query.strip():
714
  result = "No search query provided. Please enter a search term."
715
- _log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
716
  return result
717
 
718
  # Validate parameters
@@ -784,7 +783,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
784
  raw = _perform_search(search_type)
785
  except Exception as e:
786
  result = f"Error: {str(e)}"
787
- _log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
788
  return result
789
 
790
  # Smart fallback: if news search returns empty and we haven't tried text yet, try text search
@@ -801,7 +800,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
801
  if not raw:
802
  fallback_note = " (also tried 'text' search as fallback)" if original_search_type == "news" and used_fallback else ""
803
  result = f"No {original_search_type} results found for query: {query}{fallback_note}"
804
- _log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
805
  return result
806
 
807
  # Apply pagination by slicing the results
@@ -812,7 +811,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
812
  result = f"Offset {actual_offset} exceeds available results ({len(raw)} total). Try offset=0 to start from beginning."
813
  else:
814
  result = f"No {original_search_type} results found on page {calculated_page} for query: {query}. Try page 1 or reduce page number."
815
- _log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
816
  return result
817
 
818
  # Format results based on search type
@@ -854,7 +853,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
854
  search_info = f"type={original_search_type}"
855
  if used_fallback:
856
  search_info += "→text"
857
- _log_call_end("Search_DuckDuckGo", f"{search_info} page={calculated_page} offset={actual_offset} results={len(paginated_results)} chars={len(result)}")
858
  return result
859
 
860
 
@@ -862,7 +861,7 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
862
  # Code Execution: Python (MCP tool #3)
863
  # ======================================
864
 
865
- def Execute_Python(code: Annotated[str, "Python source code to run; stdout is captured and returned."]) -> str:
866
  """
867
  Execute arbitrary Python code and return captured stdout or an error message.
868
 
@@ -873,10 +872,10 @@ def Execute_Python(code: Annotated[str, "Python source code to run; stdout is ca
873
  str: Combined stdout produced by the code, or the exception text if
874
  execution failed.
875
  """
876
- _log_call_start("Execute_Python", code=_truncate_for_log(code or "", 300))
877
  if code is None:
878
  result = "No code provided."
879
- _log_call_end("Execute_Python", result)
880
  return result
881
 
882
  old_stdout = sys.stdout
@@ -888,12 +887,12 @@ def Execute_Python(code: Annotated[str, "Python source code to run; stdout is ca
888
  result = str(e)
889
  finally:
890
  sys.stdout = old_stdout
891
- _log_call_end("Execute_Python", _truncate_for_log(result))
892
  return result
893
 
894
 
895
  # ==========================
896
- # Kokoro TTS (MCP tool #4)
897
  # ==========================
898
 
899
  _KOKORO_STATE = {
@@ -1451,7 +1450,7 @@ def _mem_delete(
1451
 
1452
  # --- Fetch tab (compact controllable extraction) ---
1453
  fetch_interface = gr.Interface(
1454
- fn=Fetch_Webpage,
1455
  inputs=[
1456
  gr.Textbox(label="URL", placeholder="https://example.com/article"),
1457
  gr.Slider(
@@ -1483,7 +1482,7 @@ fetch_interface = gr.Interface(
1483
  ),
1484
  ],
1485
  outputs=gr.Markdown(label="Extracted Content"),
1486
- title="Fetch Webpage",
1487
  description=(
1488
  "<div style=\"text-align:center\">Convert any webpage to clean Markdown format with precision controls, or extract all links. Supports custom element removal, length limits, and pagination with offset.</div>"
1489
  ),
@@ -1500,9 +1499,9 @@ fetch_interface = gr.Interface(
1500
  flagging_mode="never",
1501
  )
1502
 
1503
- # --- Simplified DDG tab (readable output only) ---
1504
  concise_interface = gr.Interface(
1505
- fn=Search_DuckDuckGo,
1506
  inputs=[
1507
  gr.Textbox(label="Query", placeholder="topic OR site:example.com"),
1508
  gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Max results"),
@@ -1523,12 +1522,12 @@ concise_interface = gr.Interface(
1523
  ),
1524
  ],
1525
  outputs=gr.Textbox(label="Search Results", interactive=False),
1526
- title="DuckDuckGo Search",
1527
  description=(
1528
  "<div style=\"text-align:center\">Multi-type web search with readable output format, date detection, and flexible pagination. Supports text, news, images, videos, and books. Features smart fallback for news searches and precise offset control.</div>"
1529
  ),
1530
  api_description=(
1531
- "Run a DuckDuckGo search with support for multiple content types and return formatted results. "
1532
  "Features smart fallback: if 'news' search returns no results, automatically retries with 'text' search "
1533
  "to catch sources like Hacker News that might not appear in news-specific results. "
1534
  "Supports advanced search operators: site: for specific domains, quotes for exact phrases, "
@@ -1545,12 +1544,12 @@ concise_interface = gr.Interface(
1545
 
1546
  ##
1547
 
1548
- # --- Execute Python tab (simple code interpreter) ---
1549
  code_interface = gr.Interface(
1550
- fn=Execute_Python,
1551
  inputs=gr.Code(label="Python Code", language="python"),
1552
  outputs=gr.Textbox(label="Output"),
1553
- title="Python Code Executor",
1554
  description=(
1555
  "<div style=\"text-align:center\">Execute Python code and see the output.</div>"
1556
  ),
@@ -1575,7 +1574,7 @@ CSS_STYLES = """
1575
  /* Place bold tools list on line 2, normal auth note on line 3 (below title) */
1576
  .app-title::before {
1577
  grid-row: 2;
1578
- content: "Fetch Webpage | Search DuckDuckGo | Python Interpreter | Memory Manager | Kokoro TTS | Image Generation | Video Generation | Deep Research";
1579
  display: block;
1580
  font-size: 1rem;
1581
  font-weight: 700;
@@ -1753,7 +1752,7 @@ CSS_STYLES = """
1753
  }
1754
  """
1755
 
1756
- # --- Kokoro TTS tab (text to speech) ---
1757
  available_voices = get_kokoro_voices()
1758
  kokoro_interface = gr.Interface(
1759
  fn=Generate_Speech,
@@ -1768,7 +1767,7 @@ kokoro_interface = gr.Interface(
1768
  ),
1769
  ],
1770
  outputs=gr.Audio(label="Audio", type="numpy", format="wav", show_download_button=True),
1771
- title="Kokoro TTS",
1772
  description=(
1773
  "<div style=\"text-align:center\">Generate speech with Kokoro-82M. Supports multiple languages and accents. Runs on CPU or CUDA if available.</div>"
1774
  ),
@@ -1893,7 +1892,7 @@ memory_interface = gr.Interface(
1893
  )
1894
 
1895
  # ==========================
1896
- # Image Generation (Serverless)
1897
  # ==========================
1898
 
1899
  HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
@@ -2005,7 +2004,7 @@ image_generation_interface = gr.Interface(
2005
  gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Height"),
2006
  ],
2007
  outputs=gr.Image(label="Generated Image"),
2008
- title="Image Generation",
2009
  description=(
2010
  "<div style=\"text-align:center\">Generate images via Hugging Face serverless inference. "
2011
  "Default model is FLUX.1-Krea-dev.</div>"
@@ -2024,7 +2023,7 @@ image_generation_interface = gr.Interface(
2024
  )
2025
 
2026
  # ==========================
2027
- # Video Generation (Serverless)
2028
  # ==========================
2029
 
2030
  def _write_video_tmp(data_iter_or_bytes: object, suffix: str = ".mp4") -> str:
@@ -2201,7 +2200,7 @@ video_generation_interface = gr.Interface(
2201
  gr.Slider(minimum=1.0, maximum=10.0, value=4.0, step=0.5, label="Duration (s)"),
2202
  ],
2203
  outputs=gr.Video(label="Generated Video", show_download_button=True, format="mp4"),
2204
- title="Video Generation",
2205
  description=(
2206
  "<div style=\"text-align:center\">Generate short videos via Hugging Face serverless inference. "
2207
  "Default model is Wan2.2-T2V-A14B.</div>"
@@ -2279,13 +2278,13 @@ def _search_urls_only(query: str, max_results: int) -> list[str]:
2279
 
2280
 
2281
  def _fetch_page_markdown(url: str, max_chars: int = 3000) -> str:
2282
- """Fetch a single URL and return cleaned Markdown using existing Fetch_Webpage.
2283
 
2284
  Returns empty string on error.
2285
  """
2286
  try:
2287
  # Intentionally skip global fetch rate limiting for Deep Research speed.
2288
- return Fetch_Webpage(url=url, max_chars=max_chars, strip_selectors="", url_scraper=False, offset=0) # type: ignore[misc]
2289
  except Exception:
2290
  return ""
2291
 
@@ -2753,13 +2752,13 @@ _interfaces = [
2753
  deep_research_interface,
2754
  ]
2755
  _tab_names = [
2756
- "Fetch Webpage",
2757
- "DuckDuckGo Search",
2758
- "Python Code Executor",
2759
  "Memory Manager",
2760
- "Kokoro TTS",
2761
- "Image Generation",
2762
- "Video Generation",
2763
  "Deep Research",
2764
  ]
2765
 
@@ -2831,10 +2830,10 @@ with gr.Blocks(title="Nymbo/Tools MCP", theme="Nymbo/Nymbo_Theme", css=CSS_STYLE
2831
  <div class="info-card__body">
2832
  <h3>Tool Notes &amp; Kokoro Voice Legend</h3>
2833
  <p>
2834
- No authentication required for: <code>Fetch_Webpage</code>, <code>Search_DuckDuckGo</code>,
2835
- <code>Execute_Python</code>, and <code>Generate_Speech</code>.
2836
  </p>
2837
- <p><strong>Kokoro TTS voice prefixes</strong></p>
2838
  <ul class="info-list" style="display:grid;grid-template-columns:repeat(2,minmax(160px,1fr));gap:6px 16px;">
2839
  <li><code>af</code> — American female</li>
2840
  <li><code>am</code> — American male</li>
 
1
  # Purpose: One Space that offers up to seven tools/tabs (all exposed as MCP tools):
2
  # 1) Fetch — convert webpages to clean Markdown format
3
+ # 2) Web Search — compact JSONL search output (DuckDuckGo backend)
4
+ # 3) Code Interpreterexecute Python code and capture stdout/errors
5
+ # 4) Generate Speech — synthesize speech from text using Kokoro-82M with 54 voice options
6
  # 5) Memory Manager — lightweight JSON-based local memory store
7
+ # 6) Generate Image - HF serverless inference providers (requires HF_READ_TOKEN)
8
+ # 7) Generate Video - HF serverless inference providers (requires HF_READ_TOKEN)
 
9
 
10
  from __future__ import annotations
11
 
 
348
  return truncated + truncation_notice, metadata
349
 
350
 
351
+ def Web_Fetch( # <-- MCP tool #1 (Fetch)
352
  url: Annotated[str, "The absolute URL to fetch (must return HTML)."],
353
  max_chars: Annotated[int, "Maximum characters to return (0 = no limit, full page content)."] = 3000,
354
  strip_selectors: Annotated[str, "CSS selectors to remove (comma-separated, e.g., '.header, .footer, nav')."] = "",
 
374
  depending on the url_scraper setting. Content is length-limited by max_chars
375
  and includes detailed truncation metadata when content is truncated.
376
  """
377
+ _log_call_start("Web_Fetch", url=url, max_chars=max_chars, strip_selectors=strip_selectors, url_scraper=url_scraper, offset=offset)
378
  if not url or not url.strip():
379
  result = "Please enter a valid URL."
380
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
381
  return result
382
 
383
  try:
 
385
  resp.raise_for_status()
386
  except requests.exceptions.RequestException as e:
387
  result = f"An error occurred: {e}"
388
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
389
  return result
390
 
391
  final_url = str(resp.url)
392
  ctype = resp.headers.get("Content-Type", "")
393
  if "html" not in ctype.lower():
394
  result = f"Unsupported content type for extraction: {ctype or 'unknown'}"
395
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
396
  return result
397
 
398
  # Decode to text
 
418
  if offset > 0:
419
  if offset >= len(full_result):
420
  result = f"Offset {offset} exceeds content length ({len(full_result)} characters). Content ends at position {len(full_result)}."
421
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
422
  return result
423
  result = full_result[offset:]
424
  else:
 
432
  metadata["total_chars_estimate"] = len(full_result)
433
  metadata["next_cursor"] = offset + metadata["next_cursor"] if metadata["next_cursor"] else None
434
 
435
+ _log_call_end("Web_Fetch", f"chars={len(result)}, url_scraper={url_scraper}, offset={offset}")
436
  return result
437
 
438
 
439
  # ============================================
440
+ # Web Search (DuckDuckGo backend): Enhanced with error handling & rate limiting
441
  # ============================================
442
 
443
  import asyncio
 
526
  def _fetch_page_markdown_fast(url: str, max_chars: int = 3000, timeout: float = 10.0) -> str:
527
  """Fetch a single URL quickly; raise SlowHost on timeout.
528
 
529
+ Uses a shorter HTTP timeout to detect slow hosts, then reuses Web_Fetch
530
  logic for conversion to Markdown. Returns empty string on non-timeout errors.
531
  """
532
  try:
 
544
  if "html" not in ctype.lower():
545
  return ""
546
 
547
+ # Decode to text and convert similar to Web_Fetch (lean path)
548
  resp.encoding = resp.encoding or resp.apparent_encoding
549
  html = resp.text
550
  soup = BeautifulSoup(html, "lxml")
 
671
  return lines
672
 
673
 
674
+ def Web_Search( # <-- MCP tool #2 (Web Search)
675
  query: Annotated[str, "The search query (supports operators like site:, quotes, OR)."],
676
  max_results: Annotated[int, "Number of results to return (1–20)."] = 5,
677
  page: Annotated[int, "Page number for pagination (1-based, each page contains max_results items)."] = 1,
 
679
  offset: Annotated[int, "Result offset to start from (overrides page if > 0, for precise continuation)."] = 0,
680
  ) -> str:
681
  """
682
+ Run a web search (DuckDuckGo backend) and return formatted results with support for multiple content types.
683
 
684
  Features smart fallback: if 'news' search returns no results, automatically retries with 'text'
685
  search to catch sources like Hacker News that might not appear in news-specific results.
 
708
  If 'news' search fails, results include a note about automatic fallback to 'text' search.
709
  Includes next_offset information for easy continuation.
710
  """
711
+ _log_call_start("Web_Search", query=query, max_results=max_results, page=page, search_type=search_type, offset=offset)
712
  if not query or not query.strip():
713
  result = "No search query provided. Please enter a search term."
714
+ _log_call_end("Web_Search", _truncate_for_log(result))
715
  return result
716
 
717
  # Validate parameters
 
783
  raw = _perform_search(search_type)
784
  except Exception as e:
785
  result = f"Error: {str(e)}"
786
+ _log_call_end("Web_Search", _truncate_for_log(result))
787
  return result
788
 
789
  # Smart fallback: if news search returns empty and we haven't tried text yet, try text search
 
800
  if not raw:
801
  fallback_note = " (also tried 'text' search as fallback)" if original_search_type == "news" and used_fallback else ""
802
  result = f"No {original_search_type} results found for query: {query}{fallback_note}"
803
+ _log_call_end("Web_Search", _truncate_for_log(result))
804
  return result
805
 
806
  # Apply pagination by slicing the results
 
811
  result = f"Offset {actual_offset} exceeds available results ({len(raw)} total). Try offset=0 to start from beginning."
812
  else:
813
  result = f"No {original_search_type} results found on page {calculated_page} for query: {query}. Try page 1 or reduce page number."
814
+ _log_call_end("Web_Search", _truncate_for_log(result))
815
  return result
816
 
817
  # Format results based on search type
 
853
  search_info = f"type={original_search_type}"
854
  if used_fallback:
855
  search_info += "→text"
856
+ _log_call_end("Web_Search", f"{search_info} page={calculated_page} offset={actual_offset} results={len(paginated_results)} chars={len(result)}")
857
  return result
858
 
859
 
 
861
  # Code Execution: Python (MCP tool #3)
862
  # ======================================
863
 
864
+ def Code_Interpreter(code: Annotated[str, "Python source code to run; stdout is captured and returned."]) -> str:
865
  """
866
  Execute arbitrary Python code and return captured stdout or an error message.
867
 
 
872
  str: Combined stdout produced by the code, or the exception text if
873
  execution failed.
874
  """
875
+ _log_call_start("Code_Interpreter", code=_truncate_for_log(code or "", 300))
876
  if code is None:
877
  result = "No code provided."
878
+ _log_call_end("Code_Interpreter", result)
879
  return result
880
 
881
  old_stdout = sys.stdout
 
887
  result = str(e)
888
  finally:
889
  sys.stdout = old_stdout
890
+ _log_call_end("Code_Interpreter", _truncate_for_log(result))
891
  return result
892
 
893
 
894
  # ==========================
895
+ # Generate Speech (MCP tool #4)
896
  # ==========================
897
 
898
  _KOKORO_STATE = {
 
1450
 
1451
  # --- Fetch tab (compact controllable extraction) ---
1452
  fetch_interface = gr.Interface(
1453
+ fn=Web_Fetch,
1454
  inputs=[
1455
  gr.Textbox(label="URL", placeholder="https://example.com/article"),
1456
  gr.Slider(
 
1482
  ),
1483
  ],
1484
  outputs=gr.Markdown(label="Extracted Content"),
1485
+ title="Web Fetch",
1486
  description=(
1487
  "<div style=\"text-align:center\">Convert any webpage to clean Markdown format with precision controls, or extract all links. Supports custom element removal, length limits, and pagination with offset.</div>"
1488
  ),
 
1499
  flagging_mode="never",
1500
  )
1501
 
1502
+ # --- Web Search tab (readable output only) ---
1503
  concise_interface = gr.Interface(
1504
+ fn=Web_Search,
1505
  inputs=[
1506
  gr.Textbox(label="Query", placeholder="topic OR site:example.com"),
1507
  gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Max results"),
 
1522
  ),
1523
  ],
1524
  outputs=gr.Textbox(label="Search Results", interactive=False),
1525
+ title="Web Search",
1526
  description=(
1527
  "<div style=\"text-align:center\">Multi-type web search with readable output format, date detection, and flexible pagination. Supports text, news, images, videos, and books. Features smart fallback for news searches and precise offset control.</div>"
1528
  ),
1529
  api_description=(
1530
+ "Run a web search (DuckDuckGo backend) with support for multiple content types and return formatted results. "
1531
  "Features smart fallback: if 'news' search returns no results, automatically retries with 'text' search "
1532
  "to catch sources like Hacker News that might not appear in news-specific results. "
1533
  "Supports advanced search operators: site: for specific domains, quotes for exact phrases, "
 
1544
 
1545
  ##
1546
 
1547
+ # --- Code Interpreter tab (Python) ---
1548
  code_interface = gr.Interface(
1549
+ fn=Code_Interpreter,
1550
  inputs=gr.Code(label="Python Code", language="python"),
1551
  outputs=gr.Textbox(label="Output"),
1552
+ title="Code Interpreter",
1553
  description=(
1554
  "<div style=\"text-align:center\">Execute Python code and see the output.</div>"
1555
  ),
 
1574
  /* Place bold tools list on line 2, normal auth note on line 3 (below title) */
1575
  .app-title::before {
1576
  grid-row: 2;
1577
+ content: "Web Fetch | Web Search | Code Interpreter | Memory Manager | Generate Speech | Generate Image | Generate Video | Deep Research";
1578
  display: block;
1579
  font-size: 1rem;
1580
  font-weight: 700;
 
1752
  }
1753
  """
1754
 
1755
+ # --- Generate Speech tab (text to speech) ---
1756
  available_voices = get_kokoro_voices()
1757
  kokoro_interface = gr.Interface(
1758
  fn=Generate_Speech,
 
1767
  ),
1768
  ],
1769
  outputs=gr.Audio(label="Audio", type="numpy", format="wav", show_download_button=True),
1770
+ title="Generate Speech",
1771
  description=(
1772
  "<div style=\"text-align:center\">Generate speech with Kokoro-82M. Supports multiple languages and accents. Runs on CPU or CUDA if available.</div>"
1773
  ),
 
1892
  )
1893
 
1894
  # ==========================
1895
+ # Generate Image (Serverless)
1896
  # ==========================
1897
 
1898
  HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
 
2004
  gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Height"),
2005
  ],
2006
  outputs=gr.Image(label="Generated Image"),
2007
+ title="Generate Image",
2008
  description=(
2009
  "<div style=\"text-align:center\">Generate images via Hugging Face serverless inference. "
2010
  "Default model is FLUX.1-Krea-dev.</div>"
 
2023
  )
2024
 
2025
  # ==========================
2026
+ # Generate Video (Serverless)
2027
  # ==========================
2028
 
2029
  def _write_video_tmp(data_iter_or_bytes: object, suffix: str = ".mp4") -> str:
 
2200
  gr.Slider(minimum=1.0, maximum=10.0, value=4.0, step=0.5, label="Duration (s)"),
2201
  ],
2202
  outputs=gr.Video(label="Generated Video", show_download_button=True, format="mp4"),
2203
+ title="Generate Video",
2204
  description=(
2205
  "<div style=\"text-align:center\">Generate short videos via Hugging Face serverless inference. "
2206
  "Default model is Wan2.2-T2V-A14B.</div>"
 
2278
 
2279
 
2280
  def _fetch_page_markdown(url: str, max_chars: int = 3000) -> str:
2281
+ """Fetch a single URL and return cleaned Markdown using existing Web_Fetch.
2282
 
2283
  Returns empty string on error.
2284
  """
2285
  try:
2286
  # Intentionally skip global fetch rate limiting for Deep Research speed.
2287
+ return Web_Fetch(url=url, max_chars=max_chars, strip_selectors="", url_scraper=False, offset=0) # type: ignore[misc]
2288
  except Exception:
2289
  return ""
2290
 
 
2752
  deep_research_interface,
2753
  ]
2754
  _tab_names = [
2755
+ "Web Fetch",
2756
+ "Web Search",
2757
+ "Code Interpreter",
2758
  "Memory Manager",
2759
+ "Generate Speech",
2760
+ "Generate Image",
2761
+ "Generate Video",
2762
  "Deep Research",
2763
  ]
2764
 
 
2830
  <div class="info-card__body">
2831
  <h3>Tool Notes &amp; Kokoro Voice Legend</h3>
2832
  <p>
2833
+ No authentication required for: <code>Web_Fetch</code>, <code>Web_Search</code>,
2834
+ <code>Code_Interpreter</code>, and <code>Generate_Speech</code>.
2835
  </p>
2836
+ <p><strong>Kokoro voice prefixes</strong></p>
2837
  <ul class="info-list" style="display:grid;grid-template-columns:repeat(2,minmax(160px,1fr));gap:6px 16px;">
2838
  <li><code>af</code> — American female</li>
2839
  <li><code>am</code> — American male</li>