gperdrizet commited on
Commit
cbec091
Β·
1 Parent(s): 1755ab9

Started work on job call context extraction

Browse files
Files changed (3) hide show
  1. configuration.py +18 -0
  2. functions/gradio.py +4 -0
  3. functions/job_call.py +54 -0
configuration.py CHANGED
@@ -17,4 +17,22 @@ You will receive structured text extracted from a LinkedIn profile PDF and GitHu
17
  - Education
18
 
19
  Format the resume using Markdown syntax, ensuring that it is easy to read and visually appealing. Use appropriate headings, bullet points, and formatting to enhance clarity and presentation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  """
 
17
  - Education
18
 
19
  Format the resume using Markdown syntax, ensuring that it is easy to read and visually appealing. Use appropriate headings, bullet points, and formatting to enhance clarity and presentation.
20
+ """
21
+
22
+ JOB_CALL_EXTRACTION_PROMPT = """
23
+ The following text is a job description from a LinkedIn job call. Please summarize and format it so that it can be used as context for an AI agent to use when writing a resume that is tailored to this specific job.
24
+ Format your output as a JSON string as follows:
25
+ {
26
+ 'Job title': 'Name of position',
27
+ 'Job description': 'Summary job description and company',
28
+ 'Key skills': 'List of skills from job post',
29
+ 'Tools/technologies': 'List of any tools or technologies mentioned in the job post',
30
+ 'Experience level': 'Description of the experience level required for the job (e.g., entry-level, mid-level, senior)',
31
+ 'Education requirements': 'Description of the education requirements for the job (e.g., degree, certifications)',
32
+ }
33
+
34
+ Here is the the job call to extract the information from
35
+
36
+ JOB CALL
37
+
38
  """
functions/gradio.py CHANGED
@@ -7,6 +7,7 @@ Functions for handling Gradio UI interactions and processing user inputs.
7
  import logging
8
  from functions.linkedin_resume import extract_text_from_linkedin_pdf
9
  from functions.github import get_github_repositories
 
10
  from functions.writer_agent import write_resume
11
 
12
  # Set up logging
@@ -76,6 +77,9 @@ def process_inputs(linkedin_pdf, github_url, job_post_text, user_instructions):
76
  if job_post_text and job_post_text.strip():
77
  result += "βœ… Job post text provided\n"
78
  logger.info(f"Job post text provided ({len(job_post_text)} characters)")
 
 
 
79
  else:
80
  result += "❌ Job post not provided\n"
81
  logger.info("No job post text provided")
 
7
  import logging
8
  from functions.linkedin_resume import extract_text_from_linkedin_pdf
9
  from functions.github import get_github_repositories
10
+ from functions.job_call import summarize_job_call
11
  from functions.writer_agent import write_resume
12
 
13
  # Set up logging
 
77
  if job_post_text and job_post_text.strip():
78
  result += "βœ… Job post text provided\n"
79
  logger.info(f"Job post text provided ({len(job_post_text)} characters)")
80
+ summary = summarize_job_call(job_post_text)
81
+ result += " βœ… Job post summary generated\n"
82
+ result += summary
83
  else:
84
  result += "❌ Job post not provided\n"
85
  logger.info("No job post text provided")
functions/job_call.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''Functions for summarizing and formatting job calls.'''
2
+
3
+ import os
4
+ from openai import OpenAI
5
+ from configuration import JOB_CALL_EXTRACTION_PROMPT
6
+
7
+ def summarize_job_call(job_call: dict) -> str:
8
+ '''Extracts and summarizes key information from job call.'''
9
+
10
+ client = OpenAI(api_key=os.environ['MODAL_API_KEY'])
11
+
12
+ client.base_url = (
13
+ 'https://gperdrizet--vllm-openai-compatible-summarization-serve.modal.run/v1'
14
+ )
15
+
16
+ # Default to first available model
17
+ model = client.models.list().data[0]
18
+ model_id = model.id
19
+
20
+ messages = [
21
+ {
22
+ 'role': 'system',
23
+ 'content': f'{JOB_CALL_EXTRACTION_PROMPT}{content}'
24
+ }
25
+ ]
26
+
27
+ completion_args = {
28
+ 'model': model_id,
29
+ 'messages': messages,
30
+ # "frequency_penalty": args.frequency_penalty,
31
+ # "max_tokens": 128,
32
+ # "n": args.n,
33
+ # "presence_penalty": args.presence_penalty,
34
+ # "seed": args.seed,
35
+ # "stop": args.stop,
36
+ # "stream": args.stream,
37
+ # "temperature": args.temperature,
38
+ # "top_p": args.top_p,
39
+ }
40
+
41
+ try:
42
+ response = client.chat.completions.create(**completion_args)
43
+
44
+ except Exception as e: # pylint: disable=broad-exception-caught
45
+ response = None
46
+ logger.error('Error during Modal API call: %s', e)
47
+
48
+ if response is not None:
49
+ summary = response.choices[0].message.content
50
+
51
+ else:
52
+ summary = None
53
+
54
+ return summary