Cristian Martinez commited on
Commit
53c82bf
Β·
1 Parent(s): 3e16bc6

improve UI and enable LLM selection

Browse files
Files changed (3) hide show
  1. app.py +208 -33
  2. llm_client.py +35 -0
  3. requirements.txt +1 -1
app.py CHANGED
@@ -4,7 +4,7 @@ import os
4
  import re
5
  import json
6
  from dotenv import load_dotenv
7
- from openai import OpenAI
8
  from pypdf import PdfReader
9
  import gradio as gr
10
 
@@ -26,17 +26,41 @@ def read_resume(file_obj):
26
  else:
27
  return file_obj.read().decode("utf-8")
28
 
29
- def analyze_resume(resume: str, job_title: str, job_desc: str) -> dict:
30
- """Call the LLM to evaluate the resume vs the job title & description."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  prompt = {
32
  "role": "user",
33
  "content": (
34
  "You are an expert career coach.\n\n"
35
  f"**Job Title:** {job_title}\n\n"
36
  "**Job Description (first 5k chars):**\n```\n"
37
- + job_desc.strip()[:5000] + "\n```\n\n"
38
- "**Candidate’s Resume** (first 5k chars):\n```\n"
39
- + resume[:5000] + "\n```\n\n"
40
  "Reply only in JSON with these keys:\n"
41
  " overall_score (int 1–10),\n"
42
  " interview_likelihood (\"High\"/\"Medium\"/\"Low\"),\n"
@@ -47,11 +71,10 @@ def analyze_resume(resume: str, job_title: str, job_desc: str) -> dict:
47
  " strengths […],\n"
48
  " missing_skills […],\n"
49
  " improvement_areas […],\n"
50
- " recommendations […]\n"
51
  )
52
  }
53
- resp = openai.chat.completions.create(
54
- model="gpt-4o-mini",
55
  messages=[prompt]
56
  )
57
  raw = resp.choices[0].message.content
@@ -99,7 +122,6 @@ def format_recs(res):
99
  # ─── Main Logic & UI ─────────────────────────────────────────────────────────────
100
 
101
  load_dotenv()
102
- openai = OpenAI()
103
 
104
  def run_agent(resume_file, job_title, job_desc):
105
  # Validate inputs
@@ -110,7 +132,6 @@ def run_agent(resume_file, job_title, job_desc):
110
  # Read and analyze
111
  resume = read_resume(resume_file)
112
  result = analyze_resume(resume, job_title, job_desc)
113
-
114
  # Handle parse errors
115
  if "error" in result:
116
  err_md = f"**Error:** {result['error']}\n\n```{result.get('raw','')}```"
@@ -126,32 +147,186 @@ def run_agent(resume_file, job_title, job_desc):
126
  return summary_md, overview_md, strengths_md, gaps_md, recs_md
127
 
128
  # Build the Gradio app
129
- app = gr.Blocks()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  with app:
132
- gr.Markdown("## πŸ€– AI-Powered Resume vs Job Description Analyzer")
 
 
 
 
 
 
 
 
 
133
 
134
  with gr.Row():
135
  # ── Sidebar for inputs ─────────────────────────
136
- with gr.Column(scale=1):
137
- title_input = gr.Textbox(label="🏷️ Job Title", placeholder="e.g. Data Scientist")
138
- job_input = gr.Textbox(label="πŸ“‹ Job Description", lines=4,
139
- placeholder="Copy-paste the full JD here…")
140
- resume_input = gr.File(label="πŸ“„ Resume (PDF or TXT)")
141
- run_btn = gr.Button("πŸ” Analyze")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  # ── Main area for results ───────────────────────
144
- with gr.Column(scale=2):
145
- summary_display = gr.Markdown()
146
- with gr.Tabs():
147
- with gr.TabItem("Overview"):
148
- overview_display = gr.Markdown()
149
- with gr.TabItem("Strengths"):
150
- strengths_display = gr.Markdown()
151
- with gr.TabItem("Gaps"):
152
- gaps_display = gr.Markdown()
153
- with gr.TabItem("Recommendations"):
154
- recs_display = gr.Markdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  # Wire up the callback
157
  run_btn.click(
@@ -171,6 +346,6 @@ if __name__ == "__main__":
171
  app.launch(
172
  server_name="0.0.0.0",
173
  server_port=7860,
174
- debug=True
175
- # for hackathon submission, add mcp_server=True
176
- )
 
4
  import re
5
  import json
6
  from dotenv import load_dotenv
7
+ from llm_client import LLMClient
8
  from pypdf import PdfReader
9
  import gradio as gr
10
 
 
26
  else:
27
  return file_obj.read().decode("utf-8")
28
 
29
+ def analyze_resume(resume_content: str, job_title: str, job_description: str) -> dict:
30
+ """Analyzes a candidate's resume against a specific job title and description.
31
+
32
+ This tool evaluates the compatibility of a resume with a given job, providing a comprehensive assessment including an overall score, interview likelihood, matching skills, and areas for improvement.
33
+
34
+ Args:
35
+ resume_content (str): The full text content of the candidate's resume.
36
+ job_title (str): The title of the job for which the resume is being evaluated.
37
+ job_description (str): The detailed description of the job, including responsibilities and requirements.
38
+
39
+ Returns:
40
+ dict: A JSON object containing the analysis results with the following keys:
41
+ - overall_score (int): A score from 1-10 indicating overall fit.
42
+ - interview_likelihood (str): "High", "Medium", or "Low" likelihood of an interview.
43
+ - matching_skills (list[str]): A list of skills from the resume that match the job description.
44
+ - evaluation_summary (str): A concise summary of the resume's evaluation.
45
+ - experience_match (str): Assessment of how well the candidate's experience matches the job.
46
+ - education_match (str): Assessment of how well the candidate's education matches the job.
47
+ - strengths (list[str]): Key strengths identified in the resume relevant to the job.
48
+ - missing_skills (list[str]): Skills required by the job but missing from the resume.
49
+ - improvement_areas (list[str]): Areas where the resume could be improved for better fit.
50
+ - recommendations (list[str]): Actionable recommendations for the candidate.
51
+ """
52
+ llm_provider = os.getenv("LLM_PROVIDER", "openai")
53
+ llm_model = os.getenv("LLM_MODEL", "gpt-4o-mini")
54
+ llm_client = LLMClient(provider=llm_provider, model=llm_model)
55
  prompt = {
56
  "role": "user",
57
  "content": (
58
  "You are an expert career coach.\n\n"
59
  f"**Job Title:** {job_title}\n\n"
60
  "**Job Description (first 5k chars):**\n```\n"
61
+ + job_description.strip()[:5000] + "\n```\n\n"
62
+ "**Candidate's Resume** (first 5k chars):\n```\n"
63
+ + resume_content[:5000] + "\n```\n\n"
64
  "Reply only in JSON with these keys:\n"
65
  " overall_score (int 1–10),\n"
66
  " interview_likelihood (\"High\"/\"Medium\"/\"Low\"),\n"
 
71
  " strengths […],\n"
72
  " missing_skills […],\n"
73
  " improvement_areas […],\n"
74
+ " recommendations […]"
75
  )
76
  }
77
+ resp = llm_client.chat_completion(
 
78
  messages=[prompt]
79
  )
80
  raw = resp.choices[0].message.content
 
122
  # ─── Main Logic & UI ─────────────────────────────────────────────────────────────
123
 
124
  load_dotenv()
 
125
 
126
  def run_agent(resume_file, job_title, job_desc):
127
  # Validate inputs
 
132
  # Read and analyze
133
  resume = read_resume(resume_file)
134
  result = analyze_resume(resume, job_title, job_desc)
 
135
  # Handle parse errors
136
  if "error" in result:
137
  err_md = f"**Error:** {result['error']}\n\n```{result.get('raw','')}```"
 
147
  return summary_md, overview_md, strengths_md, gaps_md, recs_md
148
 
149
  # Build the Gradio app
150
+ app = gr.Blocks(
151
+ title="Skills Gap Advisor",
152
+ theme=gr.themes.Soft(
153
+ primary_hue="indigo",
154
+ secondary_hue="purple",
155
+ neutral_hue="slate",
156
+ font=gr.themes.GoogleFont("Inter")
157
+ ),
158
+ css="""
159
+ .gradio-container {
160
+ max-width: 1200px !important;
161
+ margin: 0 auto !important;
162
+ background: #0f172a !important;
163
+ color: #e2e8f0 !important;
164
+ }
165
+ .main-header {
166
+ text-align: center;
167
+ margin-bottom: 2rem;
168
+ padding: 2rem 0;
169
+ background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
170
+ color: #ffffff !important;
171
+ border-radius: 12px;
172
+ margin-bottom: 2rem;
173
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
174
+ }
175
+ .input-section {
176
+ background: #1e293b !important;
177
+ padding: 1.5rem;
178
+ border-radius: 12px;
179
+ border: 1px solid #334155;
180
+ margin-bottom: 1rem;
181
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
182
+ }
183
+ .results-section {
184
+ background: #1e293b !important;
185
+ border-radius: 12px;
186
+ border: 1px solid #334155;
187
+ overflow: hidden;
188
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
189
+ }
190
+ .summary-card {
191
+ background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%);
192
+ color: #ffffff !important;
193
+ padding: 1.5rem;
194
+ border-radius: 12px;
195
+ margin-bottom: 1rem;
196
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
197
+ }
198
+ .tabs {
199
+ background: #1e293b !important;
200
+ border-radius: 12px;
201
+ padding: 1rem;
202
+ }
203
+ .tab-nav {
204
+ border-bottom: 1px solid #334155 !important;
205
+ }
206
+ .tab-nav button {
207
+ color: #94a3b8 !important;
208
+ }
209
+ .tab-nav button.selected {
210
+ color: #4f46e5 !important;
211
+ border-bottom: 2px solid #4f46e5 !important;
212
+ }
213
+ .tab-content {
214
+ background: #1e293b !important;
215
+ color: #e2e8f0 !important;
216
+ padding: 1rem;
217
+ }
218
+ .markdown {
219
+ color: #e2e8f0 !important;
220
+ }
221
+ .markdown h1, .markdown h2, .markdown h3 {
222
+ color: #ffffff !important;
223
+ }
224
+ .markdown strong {
225
+ color: #4f46e5 !important;
226
+ }
227
+ .markdown ul li {
228
+ color: #e2e8f0 !important;
229
+ }
230
+ .accordion {
231
+ background: #1e293b !important;
232
+ border: 1px solid #334155 !important;
233
+ }
234
+ .accordion-title {
235
+ color: #e2e8f0 !important;
236
+ }
237
+ .accordion-content {
238
+ color: #94a3b8 !important;
239
+ }
240
+ .button-primary {
241
+ background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
242
+ color: white !important;
243
+ border: none !important;
244
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1) !important;
245
+ }
246
+ .button-primary:hover {
247
+ background: linear-gradient(135deg, #4338ca 0%, #6d28d9 100%) !important;
248
+ transform: translateY(-1px);
249
+ box-shadow: 0 6px 8px -1px rgb(0 0 0 / 0.1) !important;
250
+ }
251
+ """
252
+ )
253
 
254
  with app:
255
+ gr.Markdown(
256
+ """
257
+ <div class="main-header">
258
+ <h1>πŸ€– Skills Gap Advisor</h1>
259
+ <p>AI-Powered Resume Analysis & Career Guidance</p>
260
+ <p><em>Powered by configurable LLM providers β€’ Available as MCP Tool</em></p>
261
+ </div>
262
+ """,
263
+ elem_classes=["main-header"]
264
+ )
265
 
266
  with gr.Row():
267
  # ── Sidebar for inputs ─────────────────────────
268
+ with gr.Column(scale=1, elem_classes=["input-section"]):
269
+ gr.Markdown("### πŸ“ Job & Resume Information")
270
+ title_input = gr.Textbox(
271
+ label="🏷️ Job Title",
272
+ placeholder="e.g. Senior Data Scientist",
273
+ info="Enter the exact job title you're applying for"
274
+ )
275
+ job_input = gr.Textbox(
276
+ label="πŸ“‹ Job Description",
277
+ lines=6,
278
+ placeholder="Copy-paste the complete job description here...",
279
+ info="Include requirements, responsibilities, and qualifications"
280
+ )
281
+ resume_input = gr.File(
282
+ label="πŸ“„ Resume Upload",
283
+ file_types=[".pdf", ".txt"]
284
+ )
285
+
286
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
287
+ gr.Markdown("**LLM Configuration** (via environment variables)")
288
+ gr.Markdown(
289
+ """
290
+ - `LLM_PROVIDER`: openai, gemini, deepseek
291
+ - `LLM_MODEL`: Model name (e.g., gpt-4o-mini, gemini-2.0-flash, deepseek-chat)
292
+ - API keys: `OPENAI_API_KEY`, `GEMINI_API_KEY`, `DEEPSEEK_API_KEY`
293
+ """
294
+ )
295
+
296
+ run_btn = gr.Button(
297
+ "πŸ” Analyze Resume",
298
+ variant="primary",
299
+ size="lg",
300
+ scale=1,
301
+ elem_classes=["button-primary"]
302
+ )
303
 
304
  # ── Main area for results ───────────────────────
305
+ with gr.Column(scale=2, elem_classes=["results-section"]):
306
+ gr.Markdown("### πŸ“Š Analysis Results")
307
+
308
+ summary_display = gr.Markdown(
309
+ elem_classes=["summary-card"],
310
+ value="Upload a resume and job description to see the analysis results here."
311
+ )
312
+
313
+ with gr.Tabs(elem_classes=["tabs"]):
314
+ with gr.TabItem("πŸ“‹ Overview", elem_id="overview-tab"):
315
+ overview_display = gr.Markdown(
316
+ value="Detailed evaluation summary will appear here after analysis."
317
+ )
318
+ with gr.TabItem("πŸ’ͺ Strengths", elem_id="strengths-tab"):
319
+ strengths_display = gr.Markdown(
320
+ value="Your matching skills and key strengths will be highlighted here."
321
+ )
322
+ with gr.TabItem("🎯 Gaps", elem_id="gaps-tab"):
323
+ gaps_display = gr.Markdown(
324
+ value="Missing skills and improvement areas will be identified here."
325
+ )
326
+ with gr.TabItem("πŸ’‘ Recommendations", elem_id="recommendations-tab"):
327
+ recs_display = gr.Markdown(
328
+ value="Personalized recommendations for improving your application will appear here."
329
+ )
330
 
331
  # Wire up the callback
332
  run_btn.click(
 
346
  app.launch(
347
  server_name="0.0.0.0",
348
  server_port=7860,
349
+ debug=True,
350
+ mcp_server=True
351
+ )
llm_client.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+
4
+ class LLMClient:
5
+ def __init__(self, provider: str = "openai", model: str = "gpt-4o-mini"):
6
+ self.provider = provider
7
+ self.model = model
8
+ self.client = self._initialize_client()
9
+
10
+ def _initialize_client(self):
11
+ if self.provider == "openai":
12
+ return OpenAI()
13
+ elif self.provider == "gemini":
14
+ return OpenAI(
15
+ api_key=os.getenv("GEMINI_API_KEY"),
16
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
17
+ )
18
+ elif self.provider == "deepseek":
19
+ return OpenAI(
20
+ api_key=os.getenv("DEEPSEEK_API_KEY"),
21
+ base_url="https://api.deepseek.com/v1"
22
+ )
23
+ # Add other providers here later
24
+ else:
25
+ raise ValueError(f"Unsupported LLM provider: {self.provider}")
26
+
27
+ def chat_completion(self, messages: list):
28
+ if self.provider == "openai" or self.provider == "gemini" or self.provider == "deepseek":
29
+ return self.client.chat.completions.create(
30
+ model=self.model,
31
+ messages=messages
32
+ )
33
+ # Add other providers' chat completion logic here later
34
+ else:
35
+ raise ValueError(f"Unsupported LLM provider: {self.provider}")
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio
2
  openai
3
  pypdf
4
  python-dotenv
 
1
+ gradio[mcp]
2
  openai
3
  pypdf
4
  python-dotenv