""" gradio.py Functions for handling Gradio UI interactions and processing user inputs. """ import logging import shutil from pathlib import Path from functions.linkedin_resume import extract_text_from_linkedin_pdf from functions.github import get_github_repositories from functions.job_call import summarize_job_call from functions.writer_agent import write_resume from configuration import DEFAULT_GITHUB_PROFILE # pylint: disable=broad-exception-caught # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class MockFile: """Mock file object that mimics uploaded file interface with just a file path.""" def __init__(self, path): self.name = path def check_default_linkedin_pdf(): """Check if default LinkedIn PDF exists in data directory.""" # Get the project root directory (parent of functions directory) project_root = Path(__file__).parent.parent default_pdf = f'{project_root}/data/linkedin_profile.pdf' if not Path(default_pdf).exists(): logger.warning("Default LinkedIn PDF not found at %s", default_pdf) return False, None return True, default_pdf def process_with_default_option( use_default_pdf, linkedin_pdf, github_profile, job_post, user_instructions ): """Process inputs with consideration for default PDF option.""" has_default, default_path = check_default_linkedin_pdf() # Determine which PDF file to use pdf_file = None if use_default_pdf and has_default: pdf_file = MockFile(default_path) elif linkedin_pdf is not None: pdf_file = linkedin_pdf return process_inputs(pdf_file, github_profile, job_post, user_instructions) def process_inputs(linkedin_pdf, github_url, job_post_text, user_instructions): """ Process the input files and URLs from the Gradio interface. Args: linkedin_pdf: Uploaded LinkedIn resume export PDF file or mock file object with path github_url (str): GitHub profile URL job_post_text (str): Job post text content user_instructions (str): Additional instructions from the user Returns: str: Formatted output with file and URL information """ result = "" extraction_result = None logger.info("Processing user inputs from Gradio interface") # Process LinkedIn PDF file if linkedin_pdf is not None: # Handle both file objects and mock file objects with path strings file_path = linkedin_pdf.name file_display_name = Path(file_path).name result += "✅ LinkedIn Resume PDF provided\n" logger.info("Processing LinkedIn PDF: %s", file_display_name) # Save uploaded file as new default (only if it's not already the default) project_root = Path(__file__).parent.parent default_pdf_path = project_root / "data" / "linkedin_profile.pdf" # Check if this is an uploaded file (not the default file) if not isinstance(linkedin_pdf, MockFile): try: # Create data directory if it doesn't exist default_pdf_path.parent.mkdir(exist_ok=True) # Copy uploaded file to default location shutil.copy2(file_path, default_pdf_path) result += " ✅ Saved as new default LinkedIn profile\n" logger.info("Saved uploaded LinkedIn PDF as new default: %s", default_pdf_path) except Exception as save_error: result += f" ⚠️ Could not save as default: {str(save_error)}\n" logger.warning("Failed to save LinkedIn PDF as default: %s", str(save_error)) # Extract and structure text from the PDF extraction_result = extract_text_from_linkedin_pdf(file_path) if extraction_result["status"] == "success": result += " ✅ Text extraction successful\n\n" logger.info("LinkedIn PDF text extraction successful") elif extraction_result["status"] == "warning": result += f" ⚠️ Text extraction: {extraction_result['message']}\n\n" logger.warning("LinkedIn PDF extraction warning: %s", extraction_result['message']) else: result += f" ❌ Text extraction failed: {extraction_result['message']}\n\n" logger.error("LinkedIn PDF extraction failed: %s", extraction_result['message']) else: result += "❌ No LinkedIn resume PDF file uploaded\n\n" logger.info("No LinkedIn PDF file provided") # Process GitHub profile # Use default GitHub profile if none provided if github_url and github_url.strip(): github_url_to_use = github_url.strip() else: github_url_to_use = DEFAULT_GITHUB_PROFILE if github_url_to_use: if github_url and github_url.strip(): result += "✅ GitHub Profile URL provided\n" else: result += "✅ Using default GitHub Profile URL\n" logger.info("Processing GitHub URL: %s", github_url_to_use) # Retrieve repositories from GitHub github_result = get_github_repositories(github_url_to_use) if github_result["status"] == "success": result += " ✅ GitHub list download successful\n\n" logger.info( "GitHub repositories retrieved successfully for %s", github_result['metadata']['username'] ) else: result += f" ❌ GitHub extraction failed: {github_result['message']}\n\n" logger.error("GitHub extraction failed: %s", github_result['message']) else: result += "❌ No GitHub profile URL provided\n\n" logger.info("No GitHub URL provided") # Process job post text if job_post_text and job_post_text.strip(): result += "✅ Job post text provided\n" logger.info("Job post text provided (%d characters)", len(job_post_text)) summary = summarize_job_call(job_post_text) result += summary result += " ✅ Job post summary generated\n" logger.info("Job post summary generated (%d characters)", len(summary)) else: result += "❌ Job post not provided\n" logger.info("No job post text provided") # Process user instructions if user_instructions and user_instructions.strip(): result += "✅ Additional instructions provided\n" logger.info("User instructions provided (%d characters)", len(user_instructions)) else: result += "ℹ️ No additional instructions provided\n" logger.info("No additional instructions provided") logger.info("Input processing completed") # Generate resume only if we have valid extraction result if extraction_result and extraction_result.get("status") == "success": try: _ = write_resume(extraction_result, user_instructions) result += "\n✅ Resume generated successfully\n" logger.info("Resume generation completed successfully") except Exception as e: result += f"\n❌ Resume generation failed: {str(e)}\n" logger.error("Resume generation failed: %s", str(e)) else: result += "\n❌ Cannot generate resume: No valid LinkedIn data extracted\n" result += "Please ensure you upload a valid LinkedIn PDF export file.\n" logger.warning("Resume generation skipped - no valid LinkedIn data available") return result def get_processed_data(linkedin_pdf, github_url, job_post_text, instructions): """ Get structured data from all inputs for further processing. Args: linkedin_pdf: Uploaded LinkedIn resume export PDF file github_url (str): GitHub profile URL job_post_text (str): Job post text content instructions (str): Additional instructions from the user Returns: dict: Structured data containing all processed information """ job_post_text = job_post_text.strip() if job_post_text and job_post_text.strip() else None instructions = instructions.strip() if instructions and instructions.strip() else None processed_data = { "linkedin": None, "github": None, "job_post": job_post_text, "user_instructions": instructions, "errors": [] } # Process LinkedIn PDF if linkedin_pdf is not None: # Handle both file objects and mock file objects with path strings file_path = linkedin_pdf.name extraction_result = extract_text_from_linkedin_pdf(file_path) if extraction_result["status"] == "success": processed_data["linkedin"] = extraction_result else: processed_data["errors"].append(f"LinkedIn: {extraction_result['message']}") # Process GitHub profile if github_url and github_url.strip(): github_result = get_github_repositories(github_url) if github_result["status"] == "success": processed_data["github"] = github_result else: processed_data["errors"].append(f"GitHub: {github_result['message']}") return processed_data