Spaces:
Configuration error
Configuration error
File size: 1,573 Bytes
cbec091 b9464fb cbec091 f9a80bc b9464fb bcdc087 cbec091 bcdc087 cbec091 bcdc087 cbec091 f9a80bc cbec091 b9464fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
'''Functions for summarizing and formatting job calls.'''
import os
import logging
from openai import OpenAI
from configuration import JOB_CALL_EXTRACTION_PROMPT
# pylint: disable=broad-exception-caught
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def summarize_job_call(job_call: str) -> str:
'''Extracts and summarizes key information from job call.'''
client = OpenAI(api_key=os.environ['MODAL_API_KEY'])
client.base_url = (
'https://gperdrizet--vllm-openai-compatible-summarization-serve.modal.run/v1'
)
# Default to first available model
model = client.models.list().data[0]
model_id = model.id
messages = [
{
'role': 'system',
'content': f'{JOB_CALL_EXTRACTION_PROMPT}{job_call}'
}
]
completion_args = {
'model': model_id,
'messages': messages,
# "frequency_penalty": args.frequency_penalty,
# "max_tokens": 128,
# "n": args.n,
# "presence_penalty": args.presence_penalty,
# "seed": args.seed,
# "stop": args.stop,
# "stream": args.stream,
# "temperature": args.temperature,
# "top_p": args.top_p,
}
try:
response = client.chat.completions.create(**completion_args)
except Exception as e:
response = None
logger.error('Error during Modal API call: %s', e)
if response is not None:
summary = response.choices[0].message.content
else:
summary = None
return summary
|