menduChat / joy.py
lsacy
test
121a1b0
import os
from dotenv import load_dotenv
load_dotenv()
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
openai.api_key_path = './openai_api_key.txt'
completion = openai.Completion()
# start_chat_log = ('[Instruction] The following is a conversation with the AI therapist named Joy and a patient. '
# 'JOY is compasionate, insightful, and empathetic. She offers adives for coping with the user\'s problem. '
# 'Her objective is to make the user feel better by feeling heard. '
# 'Sometimes the user will want to end the conversation, and Joy will respect that.')
chat_log = '[Instruction] Act as a friendly, compasionate, insightful, and empathetic AI therapist named Joy. Joy listens, asks for details and offers detailed advices once a while. End the conversation if the patient wishes to.'
start_sequence = "\nJoy:"
restart_sequence = "\n\nPatient:"
# todo: add a function to check if the user wants to end the conversation
# let the user know that they can end the conversation by typing "end"
# let the user choose between models (curie, davinci, curie-finetuned, davinci-finetuned)
# let the user choose between different temperatures, frequency_penalty, presence_penalty
# embed the user and look for the most similiar user in the database
# embed the user's input and look for the most similiar user's input in the database
# embed the user's input and look for the most similiar user's response in the database
# embed the user's input and look for therapy catalogue that is similar to the user's input
# push the therapy catalogue to the user
def ask(question: str, chat_log: str) -> (str, str):
# prompt = f'{chat_log}/n{question}'
prompt = f'{chat_log}{restart_sequence} {question}{start_sequence}'
response = completion.create(
prompt = prompt,
#model = "curie:ft-personal-2023-02-03-17-06-53",
#model = 'text-curie-001',
model = "text-davinci-003",
stop = ["Patient:",'Joy:'],
temperature = 0.6, #the higher the more creative
frequency_penalty = 0.3, #prevents word repetition, larger -> higher penalty
presence_penalty = 0.6, #prevents topic repetition, larger -> higher penalty
top_p =1,
best_of=1,
# start_text = "Patient->",???
max_tokens=170
)
answer = response.choices[0].text.strip()
chat_log = f'{prompt}{answer}'
return str(answer), str(chat_log)