import argparse import json import openai from openai_function_utils.openai_function_interface import OPENAI_FUNCTIONS_DEFINITIONS, OPENAI_AVAILABLE_FUNCTIONS from utils import get_embeddings, search_document_annoy, transform_user_question, debug_print def truncate_input_text(input_text, question, max_length=7000): # Calculate the remaining length available for the input text after accounting for the question available_length_for_input = max_length - len(question) - len( "Based on the input text: \n Give me answers for this question: ") # Truncate the input text to fit the available length truncated_input_text = input_text[:available_length_for_input] # Construct the temporary question with the truncated input text tmp_question = f"Based on the input text: {truncated_input_text}\nGive me answers for this question: {question}" return tmp_question def answer_with_gpt3_with_function_calls(input_text, question, model): question = truncate_input_text(input_text, question) messages = [ { "role": "system", "content": "".join([ "You are a professional, knowledgeable, supportive, friendly but not overly casual assistant who will help the user to answer questions about a lab. ", "In order to do so, you may use semantic_search to find relevant documents. ", ]) }, { "role": "user", "content": question } ] response = openai.ChatCompletion.create( model=model, messages=messages, functions=OPENAI_FUNCTIONS_DEFINITIONS, max_tokens=200 ) response_message = response["choices"][0]["message"] messages.append( { "role": "assistant", "content": response_message.get("content"), "function_call": response_message.get("function_call"), } ) # Check if GPT wanted to call a function if response_message.get("function_call"): # Call the function # Note: the JSON response may not always be valid; be sure to handle errors available_functions = OPENAI_AVAILABLE_FUNCTIONS # only one function in this example, but you can have multiple function_name = response_message["function_call"]["name"] # Step 4: send the info on the function call and function response to GPT function_to_call = available_functions[function_name] function_args = json.loads(response_message["function_call"]["arguments"]) function_response = function_to_call(**function_args) messages.append(response_message) # extend conversation with assistant's reply messages.append( { "role": "function", "name": function_name, "content": function_response, } ) # extend conversation with function response second_response = openai.ChatCompletion.create( model=model, messages=messages, ) # get a new response from GPT where it can see the function response return second_response.choices[0].message.content else: return response.choices[0].message.content # add input parameter: need api_key for demo def get_response_from_model(user_input, top_k=3, annoy_metric='dot', model_name="gpt-3.5-turbo", user_query_preprocess=False): assert top_k > 0, 'k must be an integer greater than 0' if user_query_preprocess: chatgpt_question = transform_user_question(user_input, model_name) else: chatgpt_question = user_input debug_print("chatgpt_question: ", chatgpt_question) try: user_q_embedding = get_embeddings(chatgpt_question) document = search_document_annoy(user_q_embedding, top_k=top_k, metric=annoy_metric) reply = answer_with_gpt3_with_function_calls(document, user_input, model_name) print(f"returning reply: {reply}") return reply except Exception as e: print(f"returning error: {e}") return e._message # return "Error when trying to get embedding for the user query. Please try with a shorter question."