# -*- coding: utf-8 -*-
# @Time    : 2024/9/12 10:21
# @Author  : yujiahao
# @File    : OpenAiBot.py
# @description: 调用OpenAI接口，简单聊天机器人

"""
OpenAI 免费 API Key 获取参考：https://github.com/chatanywhere/GPT_API_free.git
通义千问开发文档：https://help.aliyun.com/zh/model-studio/getting-started/?spm=a2c4g.11186623.0.0.550419a11NCFDK
OpenAI开发文档：https://platform.openai.com/docs/quickstart?language-preference=python&quickstart-example=images
"""
from openai import OpenAI

# 设置 API Key 和 Base URL
api_key = "XXX"
base_url = "https://api.chatanywhere.tech/v1"  # 国内中转，延时更低


# base_url = "https://api.chatanywhere.org/v1"  # 国外使用

def create_client(api_key, base_url):
    return OpenAI(api_key=api_key, base_url=base_url)


client = create_client(api_key, base_url)


# 批量输出
def ask_gpt(prompt, conversation_history, model="gpt-3.5-turbo"):
    conversation_history.append({"role": "user", "content": prompt})
    try:
        response = client.chat.completions.create(
            model=model,
            messages=conversation_history
        )
        reply = response.choices[0].message.content.strip()
        conversation_history.append({"role": "assistant", "content": reply})
        return reply
    except Exception as e:
        print("非流式传输过程中发生错误：", e)
        if hasattr(e, 'response') and e.response:
            print("Error code:", e.response.status_code)
            print("Error details:", e.response.json())
        return ""


# 流式输出
def gpt_api_stream(messages, model="gpt-3.5-turbo"):
    try:
        stream = client.chat.completions.create(
            model=model,
            messages=messages,
            stream=True,
        )

        response = ""
        for chunk in stream:
            if hasattr(chunk.choices[0].delta, 'content'):
                content = chunk.choices[0].delta.content
                if content:
                    response += content
        return response
    except Exception as e:
        print("流式传输过程中发生错误：", e)
        if hasattr(e, 'response') and e.response:
            print("Error code:", e.response.status_code)
            print("Error details:", e.response.json())
        return ""


def chat_with_gpt(use_stream, model="gpt-3.5-turbo"):
    conversation_history = [{"role": "system", "content": "You are a helpful assistant."}]
    print("开始对话吧！输入 'exit' 退出当前会话，重新选择对话模式，输入 'kill' 结束代码执行。")
    while True:
        user_input = input("你: ")
        if user_input.lower() == "exit":
            print("当前会话已退出。")
            return
        elif user_input.lower() == "kill":
            print("代码执行结束。")
            exit(0)
        try:
            if use_stream:
                response = gpt_api_stream(conversation_history + [{"role": "user", "content": user_input}], model)
                conversation_history.append({"role": "user", "content": user_input})
                conversation_history.append({"role": "assistant", "content": response})
                print("AI助手: " + response)
            else:
                response = ask_gpt(user_input, conversation_history, model)
                print("AI助手: " + response)
        except Exception as e:
            print("API 调用失败，错误信息如下：")
            print(e)
            if hasattr(e, 'response') and e.response:
                print("Error code:", e.response.status_code)
                print("Error details:", e.response.json())


if __name__ == "__main__":
    model = "gpt-4o-mini"
    while True:
        mode = input("选择对话模式：普通模式 (1) 或 流式传输模式 (2): ").strip()
        if mode == "1":
            chat_with_gpt(use_stream=False)
        elif mode == "2":
            chat_with_gpt(use_stream=True, model=model)
        else:
            print("无效的模式选择，请输入 1 或 2。")
