Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| from urllib.parse import urlparse, urljoin | |
| from bs4 import BeautifulSoup | |
| import asyncio | |
| # Common functions | |
| def is_valid_url(url): | |
| """Checks if the string is a valid URL.""" | |
| try: | |
| result = urlparse(url) | |
| return all([result.scheme, result.netloc]) # Check for scheme and domain | |
| except: | |
| return False | |
| async def fetch_file_content(url): | |
| """Fetches the content of a file (CSS, JS, etc.) from a URL.""" | |
| try: | |
| response = await asyncio.to_thread(requests.get, url, timeout=5) | |
| response.raise_for_status() | |
| return response.text | |
| except: | |
| return "Failed to fetch content." | |
| # URL to Text Converter | |
| async def extract_additional_resources(url): | |
| """Extracts links to CSS, JS, and images from HTML code.""" | |
| try: | |
| response = await asyncio.to_thread(requests.get, url, timeout=5) | |
| response.raise_for_status() | |
| # Check if the content is HTML | |
| if 'text/html' in response.headers.get('Content-Type', ''): | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| # Extract CSS links (limit to 5) | |
| css_links = [urljoin(url, link["href"]) for link in soup.find_all("link", rel="stylesheet") if "href" in link.attrs][:5] | |
| # Extract JS links (limit to 5) | |
| js_links = [urljoin(url, script["src"]) for script in soup.find_all("script") if "src" in script.attrs][:5] | |
| # Extract image links (limit to 5) | |
| img_links = [urljoin(url, img["src"]) for img in soup.find_all("img") if "src" in img.attrs][:5] | |
| # Fetch CSS and JS content asynchronously | |
| css_content = await asyncio.gather(*[fetch_file_content(link) for link in css_links]) | |
| js_content = await asyncio.gather(*[fetch_file_content(link) for link in js_links]) | |
| return css_links, js_links, img_links, css_content, js_content | |
| else: | |
| # If it's not HTML, treat it as a file | |
| return [], [], [], [response.text], [] | |
| except Exception as e: | |
| return [], [], [], [], [] | |
| async def convert_to_text(url): | |
| if not is_valid_url(url): | |
| return "Error: Please enter a valid URL.", "", None, [], [], [], [], [] # Return error message and empty data | |
| try: | |
| # Set headers to mimic a browser request | |
| headers = { | |
| "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" | |
| } | |
| response = await asyncio.to_thread(requests.get, url, headers=headers, timeout=5) | |
| response.raise_for_status() # Check for HTTP errors (e.g., 404, 500) | |
| # Return results | |
| status = f"Request status: {response.status_code}" | |
| content_length = f"Content size: {len(response.text)} characters" | |
| results = f"{status}\n{content_length}" | |
| # Save text content to a file | |
| file_path = "downloaded_content.txt" | |
| with open(file_path, "w", encoding="utf-8") as file: | |
| file.write(response.text) | |
| # Extract additional resources | |
| css_links, js_links, img_links, css_content, js_content = await extract_additional_resources(url) | |
| return results, response.text, file_path, css_links, js_links, img_links, css_content, js_content | |
| except requests.exceptions.RequestException as e: | |
| return f"Error: {e}", "", None, [], [], [], [], [] # Return error message and empty data | |
| # Model to Text Converter | |
| async def fetch_model_file_content(model_url, file_path): | |
| """Fetches the content of a file from a model repository (Hugging Face or GitHub).""" | |
| try: | |
| # Construct the full URL to the file | |
| if "huggingface.co" in model_url: | |
| # Hugging Face URL format: https://huggingface.co/{model}/raw/main/{file_path} | |
| full_url = f"{model_url}/raw/main/{file_path}" | |
| elif "github.com" in model_url: | |
| # GitHub URL format: https://github.com/{user}/{repo}/raw/main/{file_path} | |
| full_url = f"{model_url}/raw/main/{file_path}" | |
| else: | |
| return "Error: Unsupported repository." | |
| # Fetch the file content | |
| response = await asyncio.to_thread(requests.get, full_url, timeout=5) | |
| response.raise_for_status() | |
| return response.text | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # HTML and JavaScript for the "Copy Code" button | |
| copy_button_html = """ | |
| <script> | |
| function copyCode(textareaId) { | |
| const text = document.querySelector(`#${textareaId} textarea`).value; | |
| navigator.clipboard.writeText(text).then(() => { | |
| alert("Text copied to clipboard!"); | |
| }).catch(() => { | |
| alert("Failed to copy text."); | |
| }); | |
| } | |
| </script> | |
| """ | |
| # Link to the CSS file | |
| css = "app.css" | |
| # Create the Gradio interface | |
| with gr.Blocks(css=css) as demo: | |
| gr.HTML(copy_button_html) # Add the "Copy Code" script | |
| with gr.Tabs(): | |
| # Tab 1: URL to Text Converter | |
| with gr.Tab("URL to Text Converter"): | |
| gr.Markdown("## URL to Text Converter") | |
| gr.Markdown("Enter a URL to fetch its text content and download it as a .txt file.") | |
| with gr.Row(): | |
| url_input = gr.Textbox(label="Enter URL", placeholder="https://example.com") | |
| with gr.Row(): | |
| results_output = gr.Textbox(label="Request Results", interactive=False) | |
| text_output = gr.Textbox(label="Text Content", interactive=True, elem_id="output-text") | |
| with gr.Row(): | |
| gr.HTML("<button onclick='copyCode(\"output-text\")'>Copy Code</button>") # Add the "Copy Code" button | |
| file_output = gr.File(label="Download File", visible=False) # Hidden file download component | |
| submit_button = gr.Button("Fetch Content") | |
| submit_button.click( | |
| fn=convert_to_text, | |
| inputs=url_input, | |
| outputs=[ | |
| results_output, text_output, file_output, | |
| gr.Textbox(label="CSS Files"), gr.Textbox(label="JS Files"), gr.Textbox(label="Images"), | |
| gr.Textbox(label="CSS Content"), gr.Textbox(label="JS Content") | |
| ] | |
| ) | |
| # Add an Accordion to show/hide additional resources | |
| with gr.Accordion("Show/Hide Additional Resources", open=False): | |
| gr.Markdown("### CSS Files") | |
| css_output = gr.Textbox(label="CSS Files", interactive=False) | |
| gr.Markdown("### JS Files") | |
| js_output = gr.Textbox(label="JS Files", interactive=False) | |
| gr.Markdown("### Images") | |
| img_output = gr.Textbox(label="Images", interactive=False) | |
| gr.Markdown("### CSS Content") | |
| css_content_output = gr.Textbox(label="CSS Content", interactive=True) | |
| gr.Markdown("### JS Content") | |
| js_content_output = gr.Textbox(label="JS Content", interactive=True) | |
| # Tab 2: Model to Text Converter | |
| with gr.Tab("Model to Text Converter"): | |
| gr.Markdown("## Model to Text Converter") | |
| gr.Markdown("Enter a link to a model on Hugging Face or GitHub, and specify the file path.") | |
| with gr.Row(): | |
| model_url_input = gr.Textbox(label="Model URL", placeholder="https://huggingface.co/... or https://github.com/...") | |
| file_path_input = gr.Textbox(label="File Path", placeholder="e.g., config.json or README.md") | |
| with gr.Row(): | |
| model_content_output = gr.Textbox(label="File Content", interactive=True, elem_id="model-content-output") | |
| with gr.Row(): | |
| gr.HTML("<button onclick='copyCode(\"model-content-output\")'>Copy Code</button>") # Add the "Copy Code" button | |
| submit_model_button = gr.Button("Fetch File Content") | |
| submit_model_button.click( | |
| fn=fetch_model_file_content, | |
| inputs=[model_url_input, file_path_input], | |
| outputs=[model_content_output] | |
| ) | |
| # Launch the interface | |
| demo.launch() |