from openai import OpenAI


def gpt_three(data):
    """
    gpt3.5 接口，接收一个data，是一个dict对象，详细如下：
    data: {
        "message": [] # 列表，
        "config": {
            "temperature": 0-2, default 1，
            "max_token": 0-1024, default 512
        }
    }
    """

    # 处理聊天数据
    for i in range(len(data["message"])):
        if i == 0:
            data["message"][i] = {"role": "system", "content": data["message"][i]}

        elif i % 2 != 0:
            data["message"][i] = {"role": "user", "content": data["message"][i]}

        else:
            data["message"][i] = {"role": "assistant", "content": data["message"][i]}

    client = OpenAI()
    completion = client.chat.completions.create(
        model="gpt-4o",

        # default 512, between 0-1024
        max_tokens=data["config"]["max_token"] if (data["config"]["max_token"] >= 0) and (
                data["config"]["max_token"] <= 1024) else 512,

        # default 1, between 0 and 2
        temperature=data["config"]["temperature"] if (data["config"]["temperature"] >= 0) and (
                data["config"]["temperature"] <= 2) else 1,

        # 如果有system,则在列表索引为0处
        messages=data["message"]
    )

    return completion.choices[0].message.content


def gpt_three_stream(data):

    # 处理聊天数据
    for i in range(len(data["message"])):
        if i == 0:
            data["message"][i] = {"role": "system", "content": data["message"][i]}

        elif i % 2 != 0:
            data["message"][i] = {"role": "user", "content": data["message"][i]}

        else:
            data["message"][i] = {"role": "assistant", "content": data["message"][i]}

    client = OpenAI()
    completion = client.chat.completions.create(
        model='gpt-3.5-turbo',

        # default 512, between 0-1024
        max_tokens=data["config"]["max_token"] if (data["config"]["max_token"] >= 0) and (
                data["config"]["max_token"] <= 1024) else 512,

        # default 1, between 0 and 2
        temperature=data["config"]["temperature"] if (data["config"]["temperature"] >= 0) and (
                data["config"]["temperature"] <= 2) else 1,

        # 如果有system,则在列表索引为0处
        messages=data["message"],
        stream=True  # 流式返回
    )

    for chunk in completion:
        yield chunk.choices[0].delta.content

        # print(chunk)
        # print(chunk.choices[0].delta.content)
        # print("****************")


if __name__ == "__main__":
    import os

    # store openai-api-key to internal storage
    os.environ["OPENAI_API_KEY"] = "sk-proj-wOXa1EvcrzPxGvVwjbp3T3BlbkFJSZNHhSdxIjtB02yFquNW"

    # solve proxy problem
    os.environ["http_proxy"] = "http://localhost:7890"
    os.environ["https_proxy"] = "http://localhost:7890"

    data1 = {
        "message": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": "Hello!"}
        ],
        "config": {
            "temperature": 1,
            "max_token": 512
        }
    }

    # response = chat_gpt3(data1)

    # print(response)
    # while True:
    #     response = stream_chat_gpt3(data1)
    #     print(response)

    # stream_chat_gpt_three(data1)
