Cristian Martinez
commited on
Commit
Β·
53c82bf
1
Parent(s):
3e16bc6
improve UI and enable LLM selection
Browse files- app.py +208 -33
- llm_client.py +35 -0
- requirements.txt +1 -1
app.py
CHANGED
@@ -4,7 +4,7 @@ import os
|
|
4 |
import re
|
5 |
import json
|
6 |
from dotenv import load_dotenv
|
7 |
-
from
|
8 |
from pypdf import PdfReader
|
9 |
import gradio as gr
|
10 |
|
@@ -26,17 +26,41 @@ def read_resume(file_obj):
|
|
26 |
else:
|
27 |
return file_obj.read().decode("utf-8")
|
28 |
|
29 |
-
def analyze_resume(
|
30 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
prompt = {
|
32 |
"role": "user",
|
33 |
"content": (
|
34 |
"You are an expert career coach.\n\n"
|
35 |
f"**Job Title:** {job_title}\n\n"
|
36 |
"**Job Description (first 5k chars):**\n```\n"
|
37 |
-
+
|
38 |
-
"**Candidate
|
39 |
-
+
|
40 |
"Reply only in JSON with these keys:\n"
|
41 |
" overall_score (int 1β10),\n"
|
42 |
" interview_likelihood (\"High\"/\"Medium\"/\"Low\"),\n"
|
@@ -47,11 +71,10 @@ def analyze_resume(resume: str, job_title: str, job_desc: str) -> dict:
|
|
47 |
" strengths [β¦],\n"
|
48 |
" missing_skills [β¦],\n"
|
49 |
" improvement_areas [β¦],\n"
|
50 |
-
" recommendations [β¦]
|
51 |
)
|
52 |
}
|
53 |
-
resp =
|
54 |
-
model="gpt-4o-mini",
|
55 |
messages=[prompt]
|
56 |
)
|
57 |
raw = resp.choices[0].message.content
|
@@ -99,7 +122,6 @@ def format_recs(res):
|
|
99 |
# βββ Main Logic & UI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
100 |
|
101 |
load_dotenv()
|
102 |
-
openai = OpenAI()
|
103 |
|
104 |
def run_agent(resume_file, job_title, job_desc):
|
105 |
# Validate inputs
|
@@ -110,7 +132,6 @@ def run_agent(resume_file, job_title, job_desc):
|
|
110 |
# Read and analyze
|
111 |
resume = read_resume(resume_file)
|
112 |
result = analyze_resume(resume, job_title, job_desc)
|
113 |
-
|
114 |
# Handle parse errors
|
115 |
if "error" in result:
|
116 |
err_md = f"**Error:** {result['error']}\n\n```{result.get('raw','')}```"
|
@@ -126,32 +147,186 @@ def run_agent(resume_file, job_title, job_desc):
|
|
126 |
return summary_md, overview_md, strengths_md, gaps_md, recs_md
|
127 |
|
128 |
# Build the Gradio app
|
129 |
-
app = gr.Blocks(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
with app:
|
132 |
-
gr.Markdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
with gr.Row():
|
135 |
# ββ Sidebar for inputs βββββββββββββββββββββββββ
|
136 |
-
with gr.Column(scale=1):
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
# ββ Main area for results βββββββββββββββββββββββ
|
144 |
-
with gr.Column(scale=2):
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
with gr.TabItem("
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
# Wire up the callback
|
157 |
run_btn.click(
|
@@ -171,6 +346,6 @@ if __name__ == "__main__":
|
|
171 |
app.launch(
|
172 |
server_name="0.0.0.0",
|
173 |
server_port=7860,
|
174 |
-
debug=True
|
175 |
-
|
176 |
-
)
|
|
|
4 |
import re
|
5 |
import json
|
6 |
from dotenv import load_dotenv
|
7 |
+
from llm_client import LLMClient
|
8 |
from pypdf import PdfReader
|
9 |
import gradio as gr
|
10 |
|
|
|
26 |
else:
|
27 |
return file_obj.read().decode("utf-8")
|
28 |
|
29 |
+
def analyze_resume(resume_content: str, job_title: str, job_description: str) -> dict:
|
30 |
+
"""Analyzes a candidate's resume against a specific job title and description.
|
31 |
+
|
32 |
+
This tool evaluates the compatibility of a resume with a given job, providing a comprehensive assessment including an overall score, interview likelihood, matching skills, and areas for improvement.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
resume_content (str): The full text content of the candidate's resume.
|
36 |
+
job_title (str): The title of the job for which the resume is being evaluated.
|
37 |
+
job_description (str): The detailed description of the job, including responsibilities and requirements.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
dict: A JSON object containing the analysis results with the following keys:
|
41 |
+
- overall_score (int): A score from 1-10 indicating overall fit.
|
42 |
+
- interview_likelihood (str): "High", "Medium", or "Low" likelihood of an interview.
|
43 |
+
- matching_skills (list[str]): A list of skills from the resume that match the job description.
|
44 |
+
- evaluation_summary (str): A concise summary of the resume's evaluation.
|
45 |
+
- experience_match (str): Assessment of how well the candidate's experience matches the job.
|
46 |
+
- education_match (str): Assessment of how well the candidate's education matches the job.
|
47 |
+
- strengths (list[str]): Key strengths identified in the resume relevant to the job.
|
48 |
+
- missing_skills (list[str]): Skills required by the job but missing from the resume.
|
49 |
+
- improvement_areas (list[str]): Areas where the resume could be improved for better fit.
|
50 |
+
- recommendations (list[str]): Actionable recommendations for the candidate.
|
51 |
+
"""
|
52 |
+
llm_provider = os.getenv("LLM_PROVIDER", "openai")
|
53 |
+
llm_model = os.getenv("LLM_MODEL", "gpt-4o-mini")
|
54 |
+
llm_client = LLMClient(provider=llm_provider, model=llm_model)
|
55 |
prompt = {
|
56 |
"role": "user",
|
57 |
"content": (
|
58 |
"You are an expert career coach.\n\n"
|
59 |
f"**Job Title:** {job_title}\n\n"
|
60 |
"**Job Description (first 5k chars):**\n```\n"
|
61 |
+
+ job_description.strip()[:5000] + "\n```\n\n"
|
62 |
+
"**Candidate's Resume** (first 5k chars):\n```\n"
|
63 |
+
+ resume_content[:5000] + "\n```\n\n"
|
64 |
"Reply only in JSON with these keys:\n"
|
65 |
" overall_score (int 1β10),\n"
|
66 |
" interview_likelihood (\"High\"/\"Medium\"/\"Low\"),\n"
|
|
|
71 |
" strengths [β¦],\n"
|
72 |
" missing_skills [β¦],\n"
|
73 |
" improvement_areas [β¦],\n"
|
74 |
+
" recommendations [β¦]"
|
75 |
)
|
76 |
}
|
77 |
+
resp = llm_client.chat_completion(
|
|
|
78 |
messages=[prompt]
|
79 |
)
|
80 |
raw = resp.choices[0].message.content
|
|
|
122 |
# βββ Main Logic & UI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
123 |
|
124 |
load_dotenv()
|
|
|
125 |
|
126 |
def run_agent(resume_file, job_title, job_desc):
|
127 |
# Validate inputs
|
|
|
132 |
# Read and analyze
|
133 |
resume = read_resume(resume_file)
|
134 |
result = analyze_resume(resume, job_title, job_desc)
|
|
|
135 |
# Handle parse errors
|
136 |
if "error" in result:
|
137 |
err_md = f"**Error:** {result['error']}\n\n```{result.get('raw','')}```"
|
|
|
147 |
return summary_md, overview_md, strengths_md, gaps_md, recs_md
|
148 |
|
149 |
# Build the Gradio app
|
150 |
+
app = gr.Blocks(
|
151 |
+
title="Skills Gap Advisor",
|
152 |
+
theme=gr.themes.Soft(
|
153 |
+
primary_hue="indigo",
|
154 |
+
secondary_hue="purple",
|
155 |
+
neutral_hue="slate",
|
156 |
+
font=gr.themes.GoogleFont("Inter")
|
157 |
+
),
|
158 |
+
css="""
|
159 |
+
.gradio-container {
|
160 |
+
max-width: 1200px !important;
|
161 |
+
margin: 0 auto !important;
|
162 |
+
background: #0f172a !important;
|
163 |
+
color: #e2e8f0 !important;
|
164 |
+
}
|
165 |
+
.main-header {
|
166 |
+
text-align: center;
|
167 |
+
margin-bottom: 2rem;
|
168 |
+
padding: 2rem 0;
|
169 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
|
170 |
+
color: #ffffff !important;
|
171 |
+
border-radius: 12px;
|
172 |
+
margin-bottom: 2rem;
|
173 |
+
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
174 |
+
}
|
175 |
+
.input-section {
|
176 |
+
background: #1e293b !important;
|
177 |
+
padding: 1.5rem;
|
178 |
+
border-radius: 12px;
|
179 |
+
border: 1px solid #334155;
|
180 |
+
margin-bottom: 1rem;
|
181 |
+
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
|
182 |
+
}
|
183 |
+
.results-section {
|
184 |
+
background: #1e293b !important;
|
185 |
+
border-radius: 12px;
|
186 |
+
border: 1px solid #334155;
|
187 |
+
overflow: hidden;
|
188 |
+
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
|
189 |
+
}
|
190 |
+
.summary-card {
|
191 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
|
192 |
+
color: #ffffff !important;
|
193 |
+
padding: 1.5rem;
|
194 |
+
border-radius: 12px;
|
195 |
+
margin-bottom: 1rem;
|
196 |
+
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
|
197 |
+
}
|
198 |
+
.tabs {
|
199 |
+
background: #1e293b !important;
|
200 |
+
border-radius: 12px;
|
201 |
+
padding: 1rem;
|
202 |
+
}
|
203 |
+
.tab-nav {
|
204 |
+
border-bottom: 1px solid #334155 !important;
|
205 |
+
}
|
206 |
+
.tab-nav button {
|
207 |
+
color: #94a3b8 !important;
|
208 |
+
}
|
209 |
+
.tab-nav button.selected {
|
210 |
+
color: #4f46e5 !important;
|
211 |
+
border-bottom: 2px solid #4f46e5 !important;
|
212 |
+
}
|
213 |
+
.tab-content {
|
214 |
+
background: #1e293b !important;
|
215 |
+
color: #e2e8f0 !important;
|
216 |
+
padding: 1rem;
|
217 |
+
}
|
218 |
+
.markdown {
|
219 |
+
color: #e2e8f0 !important;
|
220 |
+
}
|
221 |
+
.markdown h1, .markdown h2, .markdown h3 {
|
222 |
+
color: #ffffff !important;
|
223 |
+
}
|
224 |
+
.markdown strong {
|
225 |
+
color: #4f46e5 !important;
|
226 |
+
}
|
227 |
+
.markdown ul li {
|
228 |
+
color: #e2e8f0 !important;
|
229 |
+
}
|
230 |
+
.accordion {
|
231 |
+
background: #1e293b !important;
|
232 |
+
border: 1px solid #334155 !important;
|
233 |
+
}
|
234 |
+
.accordion-title {
|
235 |
+
color: #e2e8f0 !important;
|
236 |
+
}
|
237 |
+
.accordion-content {
|
238 |
+
color: #94a3b8 !important;
|
239 |
+
}
|
240 |
+
.button-primary {
|
241 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
|
242 |
+
color: white !important;
|
243 |
+
border: none !important;
|
244 |
+
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1) !important;
|
245 |
+
}
|
246 |
+
.button-primary:hover {
|
247 |
+
background: linear-gradient(135deg, #4338ca 0%, #6d28d9 100%) !important;
|
248 |
+
transform: translateY(-1px);
|
249 |
+
box-shadow: 0 6px 8px -1px rgb(0 0 0 / 0.1) !important;
|
250 |
+
}
|
251 |
+
"""
|
252 |
+
)
|
253 |
|
254 |
with app:
|
255 |
+
gr.Markdown(
|
256 |
+
"""
|
257 |
+
<div class="main-header">
|
258 |
+
<h1>π€ Skills Gap Advisor</h1>
|
259 |
+
<p>AI-Powered Resume Analysis & Career Guidance</p>
|
260 |
+
<p><em>Powered by configurable LLM providers β’ Available as MCP Tool</em></p>
|
261 |
+
</div>
|
262 |
+
""",
|
263 |
+
elem_classes=["main-header"]
|
264 |
+
)
|
265 |
|
266 |
with gr.Row():
|
267 |
# ββ Sidebar for inputs βββββββββββββββββββββββββ
|
268 |
+
with gr.Column(scale=1, elem_classes=["input-section"]):
|
269 |
+
gr.Markdown("### π Job & Resume Information")
|
270 |
+
title_input = gr.Textbox(
|
271 |
+
label="π·οΈ Job Title",
|
272 |
+
placeholder="e.g. Senior Data Scientist",
|
273 |
+
info="Enter the exact job title you're applying for"
|
274 |
+
)
|
275 |
+
job_input = gr.Textbox(
|
276 |
+
label="π Job Description",
|
277 |
+
lines=6,
|
278 |
+
placeholder="Copy-paste the complete job description here...",
|
279 |
+
info="Include requirements, responsibilities, and qualifications"
|
280 |
+
)
|
281 |
+
resume_input = gr.File(
|
282 |
+
label="π Resume Upload",
|
283 |
+
file_types=[".pdf", ".txt"]
|
284 |
+
)
|
285 |
+
|
286 |
+
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
287 |
+
gr.Markdown("**LLM Configuration** (via environment variables)")
|
288 |
+
gr.Markdown(
|
289 |
+
"""
|
290 |
+
- `LLM_PROVIDER`: openai, gemini, deepseek
|
291 |
+
- `LLM_MODEL`: Model name (e.g., gpt-4o-mini, gemini-2.0-flash, deepseek-chat)
|
292 |
+
- API keys: `OPENAI_API_KEY`, `GEMINI_API_KEY`, `DEEPSEEK_API_KEY`
|
293 |
+
"""
|
294 |
+
)
|
295 |
+
|
296 |
+
run_btn = gr.Button(
|
297 |
+
"π Analyze Resume",
|
298 |
+
variant="primary",
|
299 |
+
size="lg",
|
300 |
+
scale=1,
|
301 |
+
elem_classes=["button-primary"]
|
302 |
+
)
|
303 |
|
304 |
# ββ Main area for results βββββββββββββββββββββββ
|
305 |
+
with gr.Column(scale=2, elem_classes=["results-section"]):
|
306 |
+
gr.Markdown("### π Analysis Results")
|
307 |
+
|
308 |
+
summary_display = gr.Markdown(
|
309 |
+
elem_classes=["summary-card"],
|
310 |
+
value="Upload a resume and job description to see the analysis results here."
|
311 |
+
)
|
312 |
+
|
313 |
+
with gr.Tabs(elem_classes=["tabs"]):
|
314 |
+
with gr.TabItem("π Overview", elem_id="overview-tab"):
|
315 |
+
overview_display = gr.Markdown(
|
316 |
+
value="Detailed evaluation summary will appear here after analysis."
|
317 |
+
)
|
318 |
+
with gr.TabItem("πͺ Strengths", elem_id="strengths-tab"):
|
319 |
+
strengths_display = gr.Markdown(
|
320 |
+
value="Your matching skills and key strengths will be highlighted here."
|
321 |
+
)
|
322 |
+
with gr.TabItem("π― Gaps", elem_id="gaps-tab"):
|
323 |
+
gaps_display = gr.Markdown(
|
324 |
+
value="Missing skills and improvement areas will be identified here."
|
325 |
+
)
|
326 |
+
with gr.TabItem("π‘ Recommendations", elem_id="recommendations-tab"):
|
327 |
+
recs_display = gr.Markdown(
|
328 |
+
value="Personalized recommendations for improving your application will appear here."
|
329 |
+
)
|
330 |
|
331 |
# Wire up the callback
|
332 |
run_btn.click(
|
|
|
346 |
app.launch(
|
347 |
server_name="0.0.0.0",
|
348 |
server_port=7860,
|
349 |
+
debug=True,
|
350 |
+
mcp_server=True
|
351 |
+
)
|
llm_client.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from openai import OpenAI
|
3 |
+
|
4 |
+
class LLMClient:
|
5 |
+
def __init__(self, provider: str = "openai", model: str = "gpt-4o-mini"):
|
6 |
+
self.provider = provider
|
7 |
+
self.model = model
|
8 |
+
self.client = self._initialize_client()
|
9 |
+
|
10 |
+
def _initialize_client(self):
|
11 |
+
if self.provider == "openai":
|
12 |
+
return OpenAI()
|
13 |
+
elif self.provider == "gemini":
|
14 |
+
return OpenAI(
|
15 |
+
api_key=os.getenv("GEMINI_API_KEY"),
|
16 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
17 |
+
)
|
18 |
+
elif self.provider == "deepseek":
|
19 |
+
return OpenAI(
|
20 |
+
api_key=os.getenv("DEEPSEEK_API_KEY"),
|
21 |
+
base_url="https://api.deepseek.com/v1"
|
22 |
+
)
|
23 |
+
# Add other providers here later
|
24 |
+
else:
|
25 |
+
raise ValueError(f"Unsupported LLM provider: {self.provider}")
|
26 |
+
|
27 |
+
def chat_completion(self, messages: list):
|
28 |
+
if self.provider == "openai" or self.provider == "gemini" or self.provider == "deepseek":
|
29 |
+
return self.client.chat.completions.create(
|
30 |
+
model=self.model,
|
31 |
+
messages=messages
|
32 |
+
)
|
33 |
+
# Add other providers' chat completion logic here later
|
34 |
+
else:
|
35 |
+
raise ValueError(f"Unsupported LLM provider: {self.provider}")
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
gradio
|
2 |
openai
|
3 |
pypdf
|
4 |
python-dotenv
|
|
|
1 |
+
gradio[mcp]
|
2 |
openai
|
3 |
pypdf
|
4 |
python-dotenv
|