Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,10 @@ import PyPDF2
|
|
4 |
import io
|
5 |
from docx import Document
|
6 |
|
|
|
|
|
|
|
|
|
7 |
# Initialize the inference client from Hugging Face.
|
8 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
9 |
|
@@ -30,10 +34,10 @@ def extract_text_from_docx(docx_file_bytes):
|
|
30 |
return f"Error reading DOCX: {e}"
|
31 |
|
32 |
def parse_cv(file, job_description):
|
33 |
-
"""Analyze the CV
|
34 |
if file is None:
|
35 |
return "Please upload a CV file.", ""
|
36 |
-
|
37 |
try:
|
38 |
file_bytes = file
|
39 |
file_ext = "pdf"
|
@@ -52,21 +56,28 @@ def parse_cv(file, job_description):
|
|
52 |
extracted_text = extract_text_from_pdf(file_bytes)
|
53 |
elif file_ext == "docx":
|
54 |
extracted_text = extract_text_from_docx(file_bytes)
|
55 |
-
|
56 |
# Check for extraction errors
|
57 |
if extracted_text.startswith("Error"):
|
58 |
return extracted_text, "Error during text extraction. Please check the file."
|
59 |
|
60 |
-
# Prepare
|
61 |
prompt = (
|
62 |
-
f"Analyze the CV against the job description. Provide a summary, assessment,
|
|
|
63 |
f"Job Description:\n{job_description}\n\n"
|
64 |
-
f"Candidate CV:\n{extracted_text}"
|
65 |
)
|
66 |
|
|
|
67 |
try:
|
68 |
analysis = client.text_generation(prompt, max_new_tokens=512)
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
70 |
except Exception as e:
|
71 |
return extracted_text, f"Analysis Error: {e}"
|
72 |
|
@@ -95,7 +106,38 @@ def respond(message, history: list[tuple[str, str]], system_message, max_tokens,
|
|
95 |
except Exception as e:
|
96 |
yield f"Error during chat generation: {e}"
|
97 |
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
demo = gr.Blocks()
|
100 |
with demo:
|
101 |
gr.Markdown("## AI-powered CV Analyzer and Chatbot")
|
@@ -119,13 +161,23 @@ with demo:
|
|
119 |
job_desc_input = gr.Textbox(label="Job Description", lines=5)
|
120 |
extracted_text = gr.Textbox(label="Extracted CV Content", lines=10, interactive=False)
|
121 |
analysis_output = gr.Textbox(label="Analysis Report", lines=10)
|
122 |
-
analyze_button = gr.Button("Analyze CV")
|
123 |
|
|
|
124 |
analyze_button.click(
|
125 |
parse_cv,
|
126 |
inputs=[file_input, job_desc_input],
|
127 |
outputs=[extracted_text, analysis_output]
|
128 |
)
|
129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
if __name__ == "__main__":
|
131 |
-
demo.queue().launch(
|
|
|
4 |
import io
|
5 |
from docx import Document
|
6 |
|
7 |
+
# For PDF generation
|
8 |
+
from reportlab.pdfgen import canvas
|
9 |
+
from reportlab.lib.pagesizes import letter
|
10 |
+
|
11 |
# Initialize the inference client from Hugging Face.
|
12 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
13 |
|
|
|
34 |
return f"Error reading DOCX: {e}"
|
35 |
|
36 |
def parse_cv(file, job_description):
|
37 |
+
"""Analyze the CV, show the prompt (debug) and return LLM analysis."""
|
38 |
if file is None:
|
39 |
return "Please upload a CV file.", ""
|
40 |
+
|
41 |
try:
|
42 |
file_bytes = file
|
43 |
file_ext = "pdf"
|
|
|
56 |
extracted_text = extract_text_from_pdf(file_bytes)
|
57 |
elif file_ext == "docx":
|
58 |
extracted_text = extract_text_from_docx(file_bytes)
|
59 |
+
|
60 |
# Check for extraction errors
|
61 |
if extracted_text.startswith("Error"):
|
62 |
return extracted_text, "Error during text extraction. Please check the file."
|
63 |
|
64 |
+
# Prepare debug prompt
|
65 |
prompt = (
|
66 |
+
f"Analyze the CV against the job description. Provide a summary, assessment, "
|
67 |
+
f"and a score 0-10.\n\n"
|
68 |
f"Job Description:\n{job_description}\n\n"
|
69 |
+
f"Candidate CV:\n{extracted_text}\n"
|
70 |
)
|
71 |
|
72 |
+
# Call LLM
|
73 |
try:
|
74 |
analysis = client.text_generation(prompt, max_new_tokens=512)
|
75 |
+
# Show both the debug prompt and analysis in the "Analysis Report"
|
76 |
+
analysis_report = (
|
77 |
+
f"--- DEBUG PROMPT ---\n{prompt}\n"
|
78 |
+
f"--- LLM ANALYSIS ---\n{analysis}"
|
79 |
+
)
|
80 |
+
return extracted_text, analysis_report
|
81 |
except Exception as e:
|
82 |
return extracted_text, f"Analysis Error: {e}"
|
83 |
|
|
|
106 |
except Exception as e:
|
107 |
yield f"Error during chat generation: {e}"
|
108 |
|
109 |
+
|
110 |
+
def create_pdf_report(report_text):
|
111 |
+
"""
|
112 |
+
Creates a PDF from the given report text and returns (filename, PDF_bytes).
|
113 |
+
"""
|
114 |
+
if not report_text.strip():
|
115 |
+
# If there's no report, just return a basic PDF stating so
|
116 |
+
report_text = "No analysis report to convert."
|
117 |
+
|
118 |
+
# Use ReportLab to generate PDF in-memory
|
119 |
+
pdf_buffer = io.BytesIO()
|
120 |
+
c = canvas.Canvas(pdf_buffer, pagesize=letter)
|
121 |
+
|
122 |
+
# Title
|
123 |
+
c.setFont("Helvetica-Bold", 14)
|
124 |
+
c.drawString(72, 750, "Analysis Report")
|
125 |
+
|
126 |
+
# Body
|
127 |
+
text_obj = c.beginText(72, 730)
|
128 |
+
text_obj.setFont("Helvetica", 11)
|
129 |
+
for line in report_text.split("\n"):
|
130 |
+
text_obj.textLine(line)
|
131 |
+
c.drawText(text_obj)
|
132 |
+
|
133 |
+
c.showPage()
|
134 |
+
c.save()
|
135 |
+
pdf_buffer.seek(0)
|
136 |
+
|
137 |
+
return ("analysis_report.pdf", pdf_buffer.getvalue())
|
138 |
+
|
139 |
+
|
140 |
+
# Build the Gradio UI
|
141 |
demo = gr.Blocks()
|
142 |
with demo:
|
143 |
gr.Markdown("## AI-powered CV Analyzer and Chatbot")
|
|
|
161 |
job_desc_input = gr.Textbox(label="Job Description", lines=5)
|
162 |
extracted_text = gr.Textbox(label="Extracted CV Content", lines=10, interactive=False)
|
163 |
analysis_output = gr.Textbox(label="Analysis Report", lines=10)
|
|
|
164 |
|
165 |
+
analyze_button = gr.Button("Analyze CV")
|
166 |
analyze_button.click(
|
167 |
parse_cv,
|
168 |
inputs=[file_input, job_desc_input],
|
169 |
outputs=[extracted_text, analysis_output]
|
170 |
)
|
171 |
|
172 |
+
# Button to generate/download PDF
|
173 |
+
download_pdf_button = gr.Button("Download Analysis as PDF")
|
174 |
+
pdf_file = gr.File(label="Download PDF", file_count="single", interactive=False)
|
175 |
+
|
176 |
+
download_pdf_button.click(
|
177 |
+
create_pdf_report,
|
178 |
+
inputs=[analysis_output], # pass the analysis text
|
179 |
+
outputs=[pdf_file] # this will allow downloading the PDF
|
180 |
+
)
|
181 |
+
|
182 |
if __name__ == "__main__":
|
183 |
+
demo.queue().launch()
|