Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,22 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
import PyPDF2
|
4 |
import io
|
5 |
from docx import Document
|
6 |
import os
|
7 |
-
import pymupdf
|
|
|
8 |
from reportlab.pdfgen import canvas
|
9 |
from reportlab.lib.pagesizes import letter
|
10 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
11 |
from reportlab.lib.styles import getSampleStyleSheet
|
12 |
from reportlab.lib import colors
|
13 |
|
|
|
14 |
client = InferenceClient(
|
15 |
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
16 |
token=os.getenv("HF_TOKEN"))
|
17 |
|
|
|
18 |
def extract_text_from_pdf(pdf_file):
|
19 |
try:
|
20 |
pdf_document = pymupdf.open(pdf_file)
|
@@ -23,6 +25,7 @@ def extract_text_from_pdf(pdf_file):
|
|
23 |
except Exception as e:
|
24 |
return f"Error reading PDF: {e}"
|
25 |
|
|
|
26 |
def extract_text_from_docx(docx_file):
|
27 |
try:
|
28 |
doc = Document(docx_file)
|
@@ -31,85 +34,118 @@ def extract_text_from_docx(docx_file):
|
|
31 |
except Exception as e:
|
32 |
return f"Error reading DOCX: {e}"
|
33 |
|
34 |
-
|
35 |
-
try:
|
36 |
-
response = client.text_generation(message, max_new_tokens=512)
|
37 |
-
history.append((message, response))
|
38 |
-
return "", history # Return empty input and updated history
|
39 |
-
except Exception as e:
|
40 |
-
return "Error: Unable to generate response.", history
|
41 |
-
|
42 |
def parse_cv(file, job_description):
|
43 |
if file is None:
|
44 |
return "Please upload a CV file.", ""
|
45 |
try:
|
46 |
file_path = file.name
|
47 |
file_ext = os.path.splitext(file_path)[1].lower()
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
except Exception as e:
|
50 |
return f"Error reading file: {e}", ""
|
51 |
if extracted_text.startswith("Error"):
|
52 |
-
return extracted_text, "Error during text extraction."
|
53 |
-
prompt =
|
|
|
|
|
|
|
|
|
|
|
54 |
try:
|
55 |
analysis = client.text_generation(prompt, max_new_tokens=512)
|
56 |
-
return extracted_text, f"
|
57 |
except Exception as e:
|
58 |
return extracted_text, f"Analysis Error: {e}"
|
59 |
|
|
|
|
|
|
|
|
|
|
|
60 |
def create_pdf_report(report_text):
|
|
|
|
|
|
|
61 |
pdf_buffer = io.BytesIO()
|
62 |
doc = SimpleDocTemplate(pdf_buffer, pagesize=letter)
|
63 |
styles = getSampleStyleSheet()
|
64 |
-
Story = [
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
doc.build(Story)
|
67 |
pdf_buffer.seek(0)
|
68 |
-
return pdf_buffer
|
69 |
|
70 |
def process_resume(resume_file, job_title):
|
|
|
|
|
|
|
|
|
71 |
if resume_file is None:
|
72 |
return "Please upload a resume file."
|
|
|
73 |
try:
|
74 |
file_path = resume_file.name
|
75 |
file_ext = os.path.splitext(file_path)[1].lower()
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
if resume_text.startswith("Error"):
|
78 |
return resume_text
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
optimized_resume = client.text_generation(prompt, max_new_tokens=1024)
|
81 |
-
return optimized_resume
|
|
|
82 |
except Exception as e:
|
83 |
return f"Error processing resume: {e}"
|
84 |
-
|
85 |
demo = gr.Blocks()
|
86 |
with demo:
|
87 |
-
gr.Markdown("## AI-powered CV Analyzer
|
88 |
-
|
89 |
-
with gr.Tab("Chatbot"):
|
90 |
-
chat_interface = gr.ChatInterface(
|
91 |
-
chatbot_response,
|
92 |
-
chatbot=gr.Chatbot(label="Chatbot"),
|
93 |
-
textbox=gr.Textbox(placeholder="Enter your message here...", label="Message"),
|
94 |
-
)
|
95 |
-
|
96 |
with gr.Tab("CV Analyzer"):
|
|
|
97 |
file_input = gr.File(label="Upload CV", file_types=[".pdf", ".docx"])
|
98 |
job_desc_input = gr.Textbox(label="Job Description", lines=5)
|
99 |
extracted_text = gr.Textbox(label="Extracted CV Content", lines=10, interactive=False)
|
100 |
-
analysis_output = gr.
|
101 |
-
download_pdf_button = gr.Button("Download Analysis as PDF", visible=False)
|
102 |
-
pdf_file = gr.File(label="Download PDF", interactive=False)
|
103 |
analyze_button = gr.Button("Analyze CV")
|
|
|
104 |
analyze_button.click(parse_cv, [file_input, job_desc_input], [extracted_text, analysis_output])
|
|
|
105 |
download_pdf_button.click(create_pdf_report, [analysis_output], [pdf_file])
|
106 |
-
|
107 |
with gr.Tab("CV Optimizer"):
|
|
|
108 |
resume_file = gr.File(label="Upload Resume (PDF or Word)", file_types=[".pdf", ".docx"])
|
109 |
-
job_title_input = gr.Textbox(label="Job Title")
|
110 |
-
optimized_resume_output = gr.
|
111 |
optimize_button = gr.Button("Optimize Resume")
|
|
|
112 |
optimize_button.click(process_resume, [resume_file, job_title_input], [optimized_resume_output])
|
113 |
|
114 |
if __name__ == "__main__":
|
115 |
-
demo.queue().launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
import io
|
4 |
from docx import Document
|
5 |
import os
|
6 |
+
import pymupdf
|
7 |
+
# For PDF generation
|
8 |
from reportlab.pdfgen import canvas
|
9 |
from reportlab.lib.pagesizes import letter
|
10 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
11 |
from reportlab.lib.styles import getSampleStyleSheet
|
12 |
from reportlab.lib import colors
|
13 |
|
14 |
+
# Initialize Hugging Face Inference Client with Meta-Llama-3.1-8B-Instruct
|
15 |
client = InferenceClient(
|
16 |
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
17 |
token=os.getenv("HF_TOKEN"))
|
18 |
|
19 |
+
# Function to extract text from PDF
|
20 |
def extract_text_from_pdf(pdf_file):
|
21 |
try:
|
22 |
pdf_document = pymupdf.open(pdf_file)
|
|
|
25 |
except Exception as e:
|
26 |
return f"Error reading PDF: {e}"
|
27 |
|
28 |
+
# Function to extract text from DOCX
|
29 |
def extract_text_from_docx(docx_file):
|
30 |
try:
|
31 |
doc = Document(docx_file)
|
|
|
34 |
except Exception as e:
|
35 |
return f"Error reading DOCX: {e}"
|
36 |
|
37 |
+
# Function to analyze CV
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
def parse_cv(file, job_description):
|
39 |
if file is None:
|
40 |
return "Please upload a CV file.", ""
|
41 |
try:
|
42 |
file_path = file.name
|
43 |
file_ext = os.path.splitext(file_path)[1].lower()
|
44 |
+
if file_ext == ".pdf":
|
45 |
+
extracted_text = extract_text_from_pdf(file_path)
|
46 |
+
elif file_ext == ".docx":
|
47 |
+
extracted_text = extract_text_from_docx(file_path)
|
48 |
+
else:
|
49 |
+
return "Unsupported file format. Please upload a PDF or DOCX file.", ""
|
50 |
except Exception as e:
|
51 |
return f"Error reading file: {e}", ""
|
52 |
if extracted_text.startswith("Error"):
|
53 |
+
return extracted_text, "Error during text extraction. Please check the file."
|
54 |
+
prompt = (
|
55 |
+
f"Analyze the CV against the job description. Provide a summary, assessment, "
|
56 |
+
f"and a score 0-10.\n\n"
|
57 |
+
f"Job Description:\n{job_description}\n\n"
|
58 |
+
f"Candidate CV:\n{extracted_text}\n"
|
59 |
+
)
|
60 |
try:
|
61 |
analysis = client.text_generation(prompt, max_new_tokens=512)
|
62 |
+
return extracted_text, f"--- Analysis Report ---\n{analysis}"
|
63 |
except Exception as e:
|
64 |
return extracted_text, f"Analysis Error: {e}"
|
65 |
|
66 |
+
# Function to toggle the download button
|
67 |
+
def toggle_download_button(analysis_report):
|
68 |
+
return gr.update(interactive=bool(analysis_report.strip()), visible=bool(analysis_report.strip()))
|
69 |
+
|
70 |
+
# Function to create PDF report
|
71 |
def create_pdf_report(report_text):
|
72 |
+
if not report_text.strip():
|
73 |
+
report_text = "No analysis report to convert."
|
74 |
+
|
75 |
pdf_buffer = io.BytesIO()
|
76 |
doc = SimpleDocTemplate(pdf_buffer, pagesize=letter)
|
77 |
styles = getSampleStyleSheet()
|
78 |
+
Story = []
|
79 |
+
|
80 |
+
title = Paragraph("<b>Analysis Report</b>", styles['Title'])
|
81 |
+
Story.append(title)
|
82 |
+
Story.append(Spacer(1, 12))
|
83 |
+
|
84 |
+
report_paragraph = Paragraph(report_text.replace("\n", "<br/>"), styles['BodyText'])
|
85 |
+
Story.append(report_paragraph)
|
86 |
+
|
87 |
doc.build(Story)
|
88 |
pdf_buffer.seek(0)
|
89 |
+
return (pdf_buffer, "analysis_report.pdf") # Return as tuple for gr.File
|
90 |
|
91 |
def process_resume(resume_file, job_title):
|
92 |
+
"""
|
93 |
+
Processes the uploaded resume, optimizes it for the given job title using the LLM,
|
94 |
+
and returns the optimized resume content.
|
95 |
+
"""
|
96 |
if resume_file is None:
|
97 |
return "Please upload a resume file."
|
98 |
+
|
99 |
try:
|
100 |
file_path = resume_file.name
|
101 |
file_ext = os.path.splitext(file_path)[1].lower()
|
102 |
+
|
103 |
+
if file_ext == ".pdf":
|
104 |
+
resume_text = extract_text_from_pdf(file_path)
|
105 |
+
elif file_ext == ".docx":
|
106 |
+
resume_text = extract_text_from_docx(file_path)
|
107 |
+
else:
|
108 |
+
return "Unsupported file format. Please upload a PDF or DOCX file."
|
109 |
+
|
110 |
if resume_text.startswith("Error"):
|
111 |
return resume_text
|
112 |
+
|
113 |
+
prompt = (
|
114 |
+
f"Optimize the following resume for the job title: {job_title}.\n"
|
115 |
+
f"Include relevant skills, experience, and keywords related to the job title.\n\n"
|
116 |
+
f"Resume:\n{resume_text}\n"
|
117 |
+
)
|
118 |
+
|
119 |
optimized_resume = client.text_generation(prompt, max_new_tokens=1024)
|
120 |
+
return optimized_resume
|
121 |
+
|
122 |
except Exception as e:
|
123 |
return f"Error processing resume: {e}"
|
124 |
+
# Build the Gradio UI
|
125 |
demo = gr.Blocks()
|
126 |
with demo:
|
127 |
+
gr.Markdown("## AI-powered CV Analyzer and Optimizer")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
with gr.Tab("CV Analyzer"):
|
129 |
+
gr.Markdown("### Upload your CV and provide the job description")
|
130 |
file_input = gr.File(label="Upload CV", file_types=[".pdf", ".docx"])
|
131 |
job_desc_input = gr.Textbox(label="Job Description", lines=5)
|
132 |
extracted_text = gr.Textbox(label="Extracted CV Content", lines=10, interactive=False)
|
133 |
+
analysis_output = gr.Textbox(label="Analysis Report", lines=10, interactive=False)
|
134 |
+
download_pdf_button = gr.Button("Download Analysis as PDF", visible=False, interactive=False)
|
135 |
+
pdf_file = gr.File(label="Download PDF", interactive=False) # Changed to gr.File
|
136 |
analyze_button = gr.Button("Analyze CV")
|
137 |
+
|
138 |
analyze_button.click(parse_cv, [file_input, job_desc_input], [extracted_text, analysis_output])
|
139 |
+
analyze_button.click(toggle_download_button, [analysis_output], [download_pdf_button])
|
140 |
download_pdf_button.click(create_pdf_report, [analysis_output], [pdf_file])
|
|
|
141 |
with gr.Tab("CV Optimizer"):
|
142 |
+
gr.Markdown("### Upload your Resume and Enter Job Title")
|
143 |
resume_file = gr.File(label="Upload Resume (PDF or Word)", file_types=[".pdf", ".docx"])
|
144 |
+
job_title_input = gr.Textbox(label="Job Title", lines=1)
|
145 |
+
optimized_resume_output = gr.Textbox(label="Optimized Resume", lines=20)
|
146 |
optimize_button = gr.Button("Optimize Resume")
|
147 |
+
|
148 |
optimize_button.click(process_resume, [resume_file, job_title_input], [optimized_resume_output])
|
149 |
|
150 |
if __name__ == "__main__":
|
151 |
+
demo.queue().launch()
|