Spaces:
Configuration error
Configuration error
File size: 7,290 Bytes
cb97851 146e731 f80cf2d f70c1ff 5af784b cb97851 f9a80bc cb97851 5af784b e55b547 5af784b e55b547 5af784b cb97851 bef6750 cb97851 b91ffb5 bef6750 cb97851 edc4b6c bef6750 cb97851 5af784b f80cf2d 5af784b cb97851 5af784b f70c1ff 5af784b f70c1ff 5af784b f70c1ff 5af784b f70c1ff 5af784b f70c1ff 5af784b f70c1ff 5af784b b9464fb cb97851 f70c1ff bef6750 f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff b9464fb f70c1ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
"""
gradio.py
Functions for handling Gradio UI interactions and processing user inputs.
"""
import logging
from pathlib import Path
from functions.helper import clean_text_whitespace
from functions.linkedin_resume import extract_text
# from functions.github import get_github_repositories
# from functions.job_call import summarize_job_call
# from functions.writer_agent import write_resume
# pylint: disable=broad-exception-caught
# Set up logging
# Create logs directory if it doesn't exist
logs_dir = Path(__file__).parent.parent / "logs"
logs_dir.mkdir(exist_ok=True)
# Strip extraneous handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Configure logging to write to file and console
logging.basicConfig(
level=logging.INFO,
format='%(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(logs_dir / "gradio.log", mode='w'), # Log to file
logging.StreamHandler() # Also log to console
]
)
def process_inputs(
linkedin_pdf_path: str = None,
github_url: str = None,
job_post_text: str = None,
user_instructions: str = None
):
"""
Process the input files and URLs from the Gradio interface.
Args:
linkedin_pdf: Uploaded LinkedIn resume export PDF file
github_url (str): GitHub profile URL
job_post_text (str): Job post text content
user_instructions (str): Additional instructions from the user
Returns:
str: Formatted output with file and URL information
"""
logger = logging.getLogger(f'{__name__}.process_inputs')
logger.info("LinkedIn PDF: %s", linkedin_pdf_path)
logger.info("GitHub URL: %s", github_url)
logger.info("Job post: %s", clean_text_whitespace(job_post_text[:100]).replace("\n", " "))
logger.info("User instructions: %s", user_instructions[:100] if user_instructions else "None")
result = ""
# Extract and structure text from the linkedin profile PDF
logger.info("Extracting text from LinkedIn PDF: %s", linkedin_pdf_path)
extraction_result = extract_text(linkedin_pdf_path)
if extraction_result:
logger.info("LinkedIn PDF text extraction successful")
else:
logger.error("LinkedIn PDF text extraction failed")
# if extraction_result["status"] == "success":
# result += " β
Text extraction successful\n\n"
# logger.info("LinkedIn PDF text extraction successful")
# elif extraction_result["status"] == "warning":
# result += f" β οΈ Text extraction: {extraction_result['message']}\n\n"
# logger.warning("LinkedIn PDF extraction warning: %s", extraction_result['message'])
# else:
# result += f" β Text extraction failed: {extraction_result['message']}\n\n"
# logger.error("LinkedIn PDF extraction failed: %s", extraction_result['message'])
# # Process GitHub profile
# if github_url and github_url.strip():
# result += "β
GitHub Profile URL provided\n"
# logger.info("Processing GitHub URL: %s", github_url.strip())
# # Retrieve repositories from GitHub
# github_result = get_github_repositories(github_url.strip())
# if github_result["status"] == "success":
# result += " β
GitHub list download successful\n\n"
# logger.info(
# "GitHub repositories retrieved successfully for %s",
# github_result['metadata']['username']
# )
# else:
# result += f" β GitHub extraction failed: {github_result['message']}\n\n"
# logger.error("GitHub extraction failed: %s", github_result['message'])
# else:
# result += "β No GitHub profile URL provided\n\n"
# logger.info("No GitHub URL provided")
# # Process job post text
# if job_post_text and job_post_text.strip():
# result += "β
Job post text provided\n"
# logger.info("Job post text provided (%d characters)", len(job_post_text))
# summary = summarize_job_call(job_post_text.strip())
# result += " β
Job post summary generated\n"
# logger.info("Job post summary generated (%d characters)", len(summary))
# else:
# result += "β Job post not provided\n"
# logger.info("No job post text provided")
# summary = None
# # Process user instructions
# if user_instructions and user_instructions.strip():
# result += "β
Additional instructions provided\n"
# logger.info("User instructions provided (%d characters)", len(user_instructions))
# else:
# result += "βΉοΈ No additional instructions provided\n"
# logger.info("No additional instructions provided")
# logger.info("Input processing completed")
# # Generate resume only if we have valid extraction result
# if extraction_result and extraction_result.get("status") == "success":
# try:
# _ = write_resume(extraction_result, user_instructions, summary)
# result += "\nβ
Resume generated successfully\n"
# logger.info("Resume generation completed successfully")
# except Exception as e:
# result += f"\nβ Resume generation failed: {str(e)}\n"
# logger.error("Resume generation failed: %s", str(e))
# else:
# result += "\nβ Cannot generate resume: No valid LinkedIn data extracted\n"
# result += "Please ensure you upload a valid LinkedIn PDF export file.\n"
# logger.warning("Resume generation skipped - no valid LinkedIn data available")
return result
# def get_processed_data(linkedin_pdf, github_url, job_post_text, instructions):
# """
# Get structured data from all inputs for further processing.
# Args:
# linkedin_pdf: Uploaded LinkedIn resume export PDF file
# github_url (str): GitHub profile URL
# job_post_text (str): Job post text content
# instructions (str): Additional instructions from the user
# Returns:
# dict: Structured data containing all processed information
# """
# job_post_text = job_post_text.strip() if job_post_text and job_post_text.strip() else None
# instructions = instructions.strip() if instructions and instructions.strip() else None
# processed_data = {
# "linkedin": None,
# "github": None,
# "job_post": job_post_text,
# "user_instructions": instructions,
# "errors": []
# }
# # Process LinkedIn PDF
# if linkedin_pdf is not None:
# file_path = linkedin_pdf.name
# extraction_result = extract_text_from_linkedin_pdf(file_path)
# if extraction_result["status"] == "success":
# processed_data["linkedin"] = extraction_result
# else:
# processed_data["errors"].append(f"LinkedIn: {extraction_result['message']}")
# # Process GitHub profile
# if github_url and github_url.strip():
# github_result = get_github_repositories(github_url)
# if github_result["status"] == "success":
# processed_data["github"] = github_result
# else:
# processed_data["errors"].append(f"GitHub: {github_result['message']}")
# return processed_data
|