Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| from bs4 import BeautifulSoup | |
| # URL for the schedule page (old schedule link provided) | |
| SCHEDULE_URL = "http://www.algoabus.co.za/port-elizabeth-bus-routes/route_list/schedule.aspx" | |
| def fetch_schedule(): | |
| """ | |
| Scrapes the schedule page and extracts bus routes and their schedules. | |
| This function assumes the schedule is contained in a table with a certain structure. | |
| Adjust the selectors if Algoa updates their page design. | |
| """ | |
| try: | |
| response = requests.get(SCHEDULE_URL, timeout=10) | |
| response.raise_for_status() | |
| html = response.text | |
| soup = BeautifulSoup(html, "html.parser") | |
| schedule_data = {} | |
| # Assume the schedule is in a table with class "scheduleTable". | |
| # (This is based on a guess—inspect the page source to confirm the actual class or id.) | |
| table = soup.find("table", {"class": "scheduleTable"}) | |
| if not table: | |
| return None, "Could not locate the schedule table on the page." | |
| rows = table.find_all("tr") | |
| if not rows or len(rows) < 2: | |
| return None, "No schedule rows found in the table." | |
| # Assuming the first row is header, start parsing from the second row. | |
| for row in rows[1:]: | |
| cols = row.find_all("td") | |
| # We assume that at least two columns exist: | |
| # Column 0: Bus route name; Column 1: Schedule details. | |
| if len(cols) >= 2: | |
| route_name = cols[0].get_text(strip=True) | |
| # The schedule details might include multiple departure times separated by newlines. | |
| schedule_info = cols[1].get_text(separator="\n", strip=True) | |
| schedule_data[route_name] = schedule_info | |
| if not schedule_data: | |
| return None, "No schedule data could be extracted from the table." | |
| return schedule_data, None | |
| except Exception as e: | |
| return None, f"Error fetching schedule: {str(e)}" | |
| # Fetch schedule data at startup. | |
| schedule_data, error = fetch_schedule() | |
| # If scraping failed, we fallback to an empty dictionary. | |
| if error: | |
| print("Scraping error:", error) | |
| schedule_data = {} | |
| def get_route_schedule(route): | |
| """ | |
| Returns the schedule details for a selected bus route. | |
| """ | |
| if not schedule_data: | |
| return "Schedule data is not available at the moment." | |
| if route not in schedule_data: | |
| return "Selected route not found in the schedule data." | |
| return f"**Route:** {route}\n\n**Schedule:**\n{schedule_data[route]}" | |
| # Create a dropdown list from the scraped routes (or a placeholder if no data was retrieved). | |
| route_choices = list(schedule_data.keys()) if schedule_data else ["No Data Available"] | |
| iface = gr.Interface( | |
| fn=get_route_schedule, | |
| inputs=gr.Dropdown(choices=route_choices, label="Select Bus Route"), | |
| outputs=gr.Textbox(label="Bus Schedule"), | |
| title="Algoa Bus Schedule", | |
| description="Select a bus route to view its schedule for Algoa Bus Company in Port Elizabeth." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |