File size: 3,438 Bytes
be0ac47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from openai import OpenAI
from tools import *

# setup the api keys
OPENAI_CLIENT = OpenAI()
# aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
# aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY")

App_state = {
    "messages":[],
    "sys_msg":[{
        "role": "system",
        "content": "Hi, I'm a Factory supply chain assistant! Ask me a question."
    }],
    "language":"en",
    "voice":"nova",
}

# def sys_msg_to_chat():
#     messages = []
#     # search for each user and assistant message pair and return them as a list of tuples
#     length = len(App_state["sys_msg"])
#     for i,msg in enumerate(App_state["sys_msg"]):

#         if msg["role"] == "user":
#             for j in range(i+1,length):
#                 if App_state["sys_msg"][j]["role"] == "assistant":
#                     messages.append((App_state["sys_msg"][i]["content"],App_state["sys_msg"][j]["content"]))
#                     break

#     App_state["messages"] = messages

def send_chat(text,messages=[]):
    '''

    send_chat function takes two arguments: text and messages



    text: the text that the user inputs

    messages: a list of tuples, each tuple contains two strings, the first string is the user input, the second string is the assistant output



    return: a tuple, the first element is an empty string, the second element is the updated messages list



    '''
    App_state["sys_msg"].extend([{ "role": "user", "content": text }])

    run(text)
    
    return "",App_state["messages"]

def run(text,depth=0,max_depth=5):
    if depth >= max_depth:
        return "I'm sorry, the inquiry is too complex for me to handle. Please try again with a simpler question."
    res = OPENAI_CLIENT.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=App_state["sys_msg"],
            tools=TOOL_SCHEMA,
            tool_choice="auto"
        )
    
    App_state["sys_msg"].append(res.choices[0].message.model_dump())
    
    if res.choices[0].message.tool_calls:
        tool_calls = res.choices[0].message.tool_calls
        tool_response = run_tool(tool_calls[0])
        tool_message = {
            "role":"tool",
            "tool_call_id":tool_calls[0].id,
            "name" : tool_calls[0].function.name,
            "content":str(tool_response)
        }
        App_state["sys_msg"].append(tool_message)
        run(text,depth+1,max_depth) # recursively running until there are no tool calls
    else:
        App_state["messages"].append((text,res.choices[0].message.content))

def run_tool(tool_call):
    tool_call_schema = tool_call.function.model_dump()
    tool_name = tool_call_schema.get("name")
    tool_args = json.loads(tool_call_schema.get("arguments"))
    if tool_args:
        result = TOOLS[tool_name](tool_args)
    else:
        result = TOOLS[tool_name]()

    return result

def translate(file_path):
    if file_path:
        f = open(file_path,"rb")
        res = OPENAI_CLIENT.audio.translations.create(
            file=f,
            model="whisper-1")
        return res.text
    else:
        return ""
    
def text_to_audio(chat_messages):
    text = chat_messages[-1][-1]
    response = OPENAI_CLIENT.audio.speech.create(
        model="tts-1",
        voice=App_state["voice"],
        input=text,
        
    )

    return response.content