import openai
import os
import logging

logger = logging.getLogger(__name__)

from aiModels import SYSTEM_PROMPT

def initialize_openai_client(api_key="sk-ee9de35aea2245619463f4d35cf4293d", base_url="https://api.deepseek.com"):
    """
    初始化 OpenAI 客户端
    :param api_key: 您的 API 密钥，默认为环境变量中的 DASHSCOPE_API_KEY
    :param base_url: API 的基础 URL，默认为 DeepSeek 的 URL
    :return: 初始化后的 OpenAI 客户端
    """
    client = openai.OpenAI(api_key=api_key, base_url=base_url)
    return client


def generate_prompt(session, user_prompt, client=None, model="deepseek-reasoner", stream=False):
    """
    生成提示词的函数，支持连续对话

    :param session: 会话历史，包含之前的对话内容
    :param user_prompt: 用户提示词，用于描述用户的需求
    :param client: 初始化后的 OpenAI 客户端，如果未提供则自动初始化
    :param model: 使用的模型，默认为 "deepseek-reasoner"
    :param stream: 是否以流式传输的方式接收响应，默认为 False
    :return: 生成的提示词内容和更新后的会话历史
    """

    if client is None:
        client = initialize_openai_client()

    # 将用户的新输入添加到会话历史中
    session.append({
        "role": "user",
        "content": user_prompt
    })
    logger.debug(f"Session history: {session}")
    try:
        responses = client.chat.completions.create(
            model=model,
            messages=session,
            stream=stream
        )
        # 检查 API 响应的内容是否为空
        if responses is None:
            logger.error("API response is None")
            raise ValueError("API response is None")

        # 打印或记录原始 API 响应内容
        logger.debug(f"Raw API response choices: {responses.choices}")

        # 检查 API 响应的状态码和内容
        if not hasattr(responses, 'choices') or not responses.choices:
            logger.error("API response does not contain choices")
            raise ValueError("API response does not contain choices")

        if stream is False:
            if not hasattr(responses.choices[0], 'message') or not hasattr(responses.choices[0].message, 'content'):
                logger.error("Invalid response format from API")
                raise ValueError("Invalid response format from API")
            generated_content = responses.choices[0].message.content
        else:
            generated_content = ""
            for response in responses:
                if hasattr(response.choices[0].delta, 'content') and response.choices[0].delta.content is not None:
                    generated_content += response.choices[0].delta.content
                    logger.debug(f"Streamed content: {response.choices[0].delta.content}")

        # 将助手的回复添加到会话历史中
        session.append({"role": "assistant", "content": generated_content})
        logger.info(f"Generated content: {generated_content}")
        return generated_content, session
    except Exception as e:
        logger.error(f"Error in generate_prompt: {e}", exc_info=True)
        raise


def get_deepseek_data(user_prompt, session=None):
    """
    获取 DeepSeek 数据的函数，支持可选会话历史
    :param user_prompt: 用户提示词，用于描述用户的需求
    :param session: 可选的会话历史，包含之前的对话内容
    :return: 生成的提示词内容和更新后的会话历史
    """
    # 如果未提供会话历史，则初始化一个默认的会话
    if session is None:
        session = [
            {
                "role": "system",
                "content": SYSTEM_PROMPT
            }
        ]
        # 如果未提供客户端，则初始化一个

    try:
        generated_content, updated_session = generate_prompt(session=session, user_prompt=user_prompt)
        return generated_content, updated_session
    except Exception as e:
        logger.error(f"Error in get_deepseek_data: {e}", exc_info=True)
        raise


# 示例使用
if __name__ == "__main__":
    # 初始化会话历史
    session = None
    # 示例连续对话
    while True:
        # 获取用户输入
        input_text = input("\n您：")
        if input_text.lower() in ["退出", "exit", "quit"]:
            print("对话结束。")
            break
        logger.info("测试对话发送后结果")
        # 生成提示词
        try:
            generated_prompt, session = get_deepseek_data(user_prompt=input_text, session=session)
            print(f"\n助手：{generated_prompt}")
        except Exception as e:
            print(f"Error generating prompt: {e}")
            break
