|
import argparse |
|
import json |
|
|
|
import openai |
|
|
|
from openai_function_utils.openai_function_interface import OPENAI_FUNCTIONS_DEFINITIONS, OPENAI_AVAILABLE_FUNCTIONS |
|
from utils import get_embeddings, search_document_annoy, transform_user_question, debug_print |
|
|
|
def truncate_input_text(input_text, question, max_length=7000): |
|
|
|
available_length_for_input = max_length - len(question) - len( |
|
"Based on the input text: \n Give me answers for this question: ") |
|
|
|
|
|
truncated_input_text = input_text[:available_length_for_input] |
|
|
|
|
|
tmp_question = f"Based on the input text: {truncated_input_text}\nGive me answers for this question: {question}" |
|
|
|
return tmp_question |
|
|
|
|
|
def answer_with_gpt3_with_function_calls(input_text, question, model): |
|
question = truncate_input_text(input_text, question) |
|
|
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": "".join([ |
|
"You are a professional, knowledgeable, supportive, friendly but not overly casual assistant who will help the user to answer questions about a lab. ", |
|
"In order to do so, you may use semantic_search to find relevant documents. ", |
|
]) |
|
}, |
|
{ |
|
"role": "user", |
|
"content": question |
|
} |
|
] |
|
|
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=messages, |
|
functions=OPENAI_FUNCTIONS_DEFINITIONS, |
|
max_tokens=200 |
|
) |
|
response_message = response["choices"][0]["message"] |
|
|
|
messages.append( |
|
{ |
|
"role": "assistant", |
|
"content": response_message.get("content"), |
|
"function_call": response_message.get("function_call"), |
|
} |
|
) |
|
|
|
|
|
if response_message.get("function_call"): |
|
|
|
|
|
available_functions = OPENAI_AVAILABLE_FUNCTIONS |
|
function_name = response_message["function_call"]["name"] |
|
|
|
|
|
|
|
function_to_call = available_functions[function_name] |
|
function_args = json.loads(response_message["function_call"]["arguments"]) |
|
function_response = function_to_call(**function_args) |
|
messages.append(response_message) |
|
messages.append( |
|
{ |
|
"role": "function", |
|
"name": function_name, |
|
"content": function_response, |
|
} |
|
) |
|
second_response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=messages, |
|
) |
|
return second_response.choices[0].message.content |
|
else: |
|
return response.choices[0].message.content |
|
|
|
|
|
def get_response_from_model(user_input, top_k=3, annoy_metric='dot', model_name="gpt-3.5-turbo", user_query_preprocess=False): |
|
|
|
assert top_k > 0, 'k must be an integer greater than 0' |
|
|
|
if user_query_preprocess: |
|
chatgpt_question = transform_user_question(user_input, model_name) |
|
else: |
|
chatgpt_question = user_input |
|
debug_print("chatgpt_question: ", chatgpt_question) |
|
|
|
try: |
|
user_q_embedding = get_embeddings(chatgpt_question) |
|
document = search_document_annoy(user_q_embedding, top_k=top_k, metric=annoy_metric) |
|
reply = answer_with_gpt3_with_function_calls(document, user_input, model_name) |
|
print(f"returning reply: {reply}") |
|
return reply |
|
except Exception as e: |
|
print(f"returning error: {e}") |
|
return e._message |
|
|
|
|