'''Functions for summarizing and formatting job calls.''' import os import logging from openai import OpenAI from configuration import JOB_CALL_EXTRACTION_PROMPT # pylint: disable=broad-exception-caught # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def summarize_job_call(job_call: str) -> str: '''Extracts and summarizes key information from job call.''' client = OpenAI(api_key=os.environ['MODAL_API_KEY']) client.base_url = ( 'https://gperdrizet--vllm-openai-compatible-summarization-serve.modal.run/v1' ) # Default to first available model model = client.models.list().data[0] model_id = model.id messages = [ { 'role': 'system', 'content': f'{JOB_CALL_EXTRACTION_PROMPT}{job_call}' } ] completion_args = { 'model': model_id, 'messages': messages, # "frequency_penalty": args.frequency_penalty, # "max_tokens": 128, # "n": args.n, # "presence_penalty": args.presence_penalty, # "seed": args.seed, # "stop": args.stop, # "stream": args.stream, # "temperature": args.temperature, # "top_p": args.top_p, } try: response = client.chat.completions.create(**completion_args) except Exception as e: response = None logger.error('Error during Modal API call: %s', e) if response is not None: summary = response.choices[0].message.content else: summary = None return summary