JobGPT / llm.py
Aditya Patkar
First Commit
6d30494
raw
history blame
1.3 kB
from langchain.chains import ConversationChain
from langchain.prompts import PromptTemplate
def generate_prompt(input_variables: list, template_file: str):
"""
Generate a prompt from a template file and a list of input variables
"""
with open(template_file, 'r', encoding='utf-8') as source_file:
template = source_file.read()
prompt = PromptTemplate(template=template, input_variables=input_variables)
return prompt
def generate_conversation(memory: object,
llm: object,
prompt: object,
verbose: bool = False):
"""
Generate a conversation from a memory object, a language model object, and a prompt object
"""
conversation = ConversationChain(memory=memory,
llm=llm,
prompt=prompt,
verbose=verbose)
return conversation
def predict(input_text: str, conversation: object):
'''
Predict the next response from the conversation object
'''
response = conversation(input_text)
history = response['history']
history = history.split('\n')
prediction = response['response']
return {'history': history, 'prediction': prediction}