import json
import time
import requests
from typing import Optional, Dict, Any
import asyncio

from exceptiongroup import catch
from tenacity import retry, stop_after_attempt, wait_fixed, retry_if_exception_type
import httpx
from concurrent.futures import ThreadPoolExecutor

from openai import OpenAI
from zhipuai import ZhipuAI

from model_config.config import CONFIG, GLM_4_MODEL_LIST
from my_utils.log_recording import Logging, log_conversation_to_csv


def retry_on_error(max_retries=3, initial_wait=1, wait_increment=1):
    """ 一个装饰器，用于在发生异常时自动重试函数，并递增等待时间。 """

    def decorator(func):
        def wrapper(*args, **kwargs):
            retries = 0
            wait_time = initial_wait
            while retries < max_retries:
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    retries += 1
                    print(f"发生错误: {e}，正在重试 ({retries}/{max_retries})，等待 {wait_time} 秒")
                    time.sleep(wait_time)
                    wait_time += wait_increment  # 增加等待时间
            print(f"达到最大重试次数 ({max_retries})，放弃")
            return None  # 或者你可以选择抛出异常

        return wrapper

    return decorator


class OpenAIObject:
    """模拟OpenAI SDK的对象属性访问"""

    def __init__(self, data: Dict):
        # 自动转换字典值为对象
        for key, value in data.items():
            if isinstance(value, dict):
                setattr(self, key, OpenAIObject(value))
            elif isinstance(value, list):
                setattr(self, key, [OpenAIObject(v) if isinstance(v, dict) else v for v in value])
            else:
                setattr(self, key, value)

    def model_dump(self) -> Dict:
        """将 OpenAIObject 对象递归地转换为字典"""
        data = {}
        for key, value in self.__dict__.items():
            if isinstance(value, OpenAIObject):
                data[key] = value.model_dump()  # 递归调用
            elif isinstance(value, list):
                data[key] = [v.model_dump() if isinstance(v, OpenAIObject) else v for v in value]
            else:
                data[key] = value
        return data

    def __repr__(self):
        return f"<OpenAIObject {self.__dict__}>"


class LLMClient(Logging):
    def __init__(self, model_name: str, model_token: str, model_api_url: Optional[str] = None):
        # 初始化模型配置
        super().__init__()
        self.model_name = model_name
        self.model_token = model_token
        self.model_api_url = model_api_url

    def _call_llm(self, payload: dict) -> dict:
        """ 调用LLM API """
        try:
            if payload is None or payload['model'] is None:
                raise ValueError("传入的payload不正确")

            self.log_info(f"开始调用模型：{payload.get('model')}")

            # 根据模型类型选择不同的处理方式
            response = None
            if payload.get('model') in GLM_4_MODEL_LIST:
                response = self._call_zhipuai_api(payload)
            else:
                response = self._call_other_api(payload)
            log_conversation_to_csv(json.dumps(payload.get('messages'), ensure_ascii=False), response)
            self.log_info("模型调用完成")
            return response

        except Exception as e:
            self.log_error(f"调用LLM时发生错误: {str(e)}")
            raise

    async def _call_llm_parallel(self, payloads: list[dict] = [], max_wait_times: int = 40) -> list[dict]:
        """ 并行调用LLM API """
        try:
            if not payloads or not isinstance(payloads[0].get('model'), str):  # 稍微改进了检查
                raise ValueError("传入的payload不正确或为空")

            self.log_info(f"开始并行调用模型：{payloads[0].get('model')}")

            # 根据模型类型选择不同的处理方式
            if payloads[0].get('model') in GLM_4_MODEL_LIST:
                # 使用 await 调用异步函数
                responses = await self._call_zhipuai_api_parallel(payloads=payloads, max_wait_times=max_wait_times)
            else:
                # 假设 _call_other_api_parallel 也需要是异步的，或者在此处处理同步/异步转换
                # responses = await self._call_other_api_parallel(payloads=payloads, max_wait_times=max_wait_times)
                # 如果 _call_other_api_parallel 保持同步，需要用 asyncio.to_thread 包装或重写为异步
                self.log_warning("_call_other_api_parallel 未实现或未改为异步，暂不支持")
                raise NotImplementedError("_call_other_api_parallel 需要实现为异步")

            self.log_info("并行调用全部完成")
            print("并行调用全部完成")  # 保留原有打印

            # 记录模型的问答内容 (这部分保持同步逻辑)
            successful_responses = []
            for index, res in enumerate(responses):
                # 新增：检查是否是成功的响应对象，而不是超时或错误字典
                if hasattr(res, 'task_status') and res.task_status == 'SUCCESS':
                    log_conversation_to_csv(json.dumps(payloads[index].get("messages"), ensure_ascii=False), res)
                    successful_responses.append(res)
                elif isinstance(res, dict) and res.get("finish_reason") == "TIMEOUT":
                    # 可以选择记录超时信息
                    log_conversation_to_csv(json.dumps(payloads[index].get("messages"), ensure_ascii=False),
                                            {"error": "TIMEOUT"})
                    successful_responses.append(res)  # 也加入，让后续处理知道是超时
                else:
                    # 记录其他错误或失败
                    log_conversation_to_csv(json.dumps(payloads[index].get("messages"), ensure_ascii=False),
                                            {"error": str(res)})
                    successful_responses.append({"error": str(res), "finish_reason": "ERROR"})  # 加入错误标识

            return successful_responses  # 返回处理过的列表

        except Exception as e:
            self.log_error(f"调用LLM时发生错误: {str(e)}")
            raise

    def _call_zhipuai_api(self, payload: dict) -> dict:
        """调用ZhipuAI的API"""
        try:
            client = ZhipuAI(api_key=self.model_token)
            call_start_time = time.time()
            response = client.chat.completions.create(**payload)
            call_duration = time.time() - call_start_time
            self.log_info(f"{payload.get('model')}模型回复耗时: {call_duration:.2f}秒")

            # 记录token消耗信息
            # self.log_info(
            #     f"Token消耗（prompt: {response.usage.prompt_tokens}, "
            #     f"completion: {response.usage.completion_tokens}, "
            #     f"total: {response.usage.total_tokens}）"
            # )
            return response

        except Exception as e:
            self.log_error(f"调用{payload.get('model')} API失败: {str(e)}")
            raise ValueError(f"调用{payload.get('model')} API出错: {str(e)}")

    def _call_other_api(self, payload: dict) -> dict:
        """调用其他模型API"""

        try:
            call_start_time = time.time()
            if self.model_api_url in Only_Support_OpenAI_SDK_list:
                client = OpenAI(api_key=self.model_token,
                                base_url=self.model_api_url)
                response = client.chat.completions.create(**payload)
            else:
                headers = {
                    "Authorization": f"Bearer {self.model_token}",
                    "Content-Type": "application/json"
                }
                response = requests.post(self.model_api_url, json=payload, headers=headers)
                response.raise_for_status()
                response = response.json()
                # 检查响应结构
                if not response.get('choices'):
                    raise ValueError(f"{payload.get('model')} API响应数据格式错误: {response}")

            call_duration = time.time() - call_start_time
            self.log_info(f"{payload.get('model')}模型回复耗时: {call_duration:.2f}秒")
            return response

        except requests.RequestException as e:
            self.log_error(f"{payload.get('model')} API请求失败: {str(e)}")
            raise ValueError(f"调用{payload.get('model')} API出错: {str(e)}")
        except KeyError as e:
            self.log_error(f"{payload.get('model')} API响应格式错误: {str(e)}")
            raise ValueError(f"响应中缺少必需的字段: {str(e)}")
        except Exception as e:
            self.log_error(f"未知错误: {str(e)}")
            raise ValueError(f"未知错误: {str(e)}")

    @retry(
        stop=stop_after_attempt(3),
        wait=wait_fixed(1),
        retry=retry_if_exception_type(httpx.HTTPStatusError)  # 或你的自定义异常
    )
    async def call_zhipu_api_async(self, client, payload, semaphore):
        async with semaphore:  # 控制并发
            try:
                res = await client.chat.asyncCompletions.create(**payload)
                return res
            except httpx.HTTPStatusError as e:
                if e.response.status_code == 429:
                    print(f"Caught 429 error: {e}")
                    raise  # 重新抛出异常，触发 tenacity 的重试
                else:
                    raise

    @staticmethod
    @retry_on_error(max_retries=5, initial_wait=1, wait_increment=1)
    async def async_completions_create(client, **kwargs):
        # 将同步的 create 调用放到线程池执行
        res = await asyncio.to_thread(
            client.chat.asyncCompletions.create,
            **kwargs  # 传递关键字参数
        )
        return res

    async def _poll_single_task(self, client: ZhipuAI, task_id: str, max_wait_seconds: int) -> Any:
        """异步轮询单个ZhipuAI任务的状态"""
        start_poll_time = time.time()
        while True:
            current_time = time.time()
            # 检查整体超时
            if current_time - start_poll_time > max_wait_seconds:
                self.log_warning(f"任务 {task_id} 轮询超时 ({max_wait_seconds}s)")
                # 返回一个特殊的字典表示超时
                return {"task_id": task_id, "finish_reason": "TIMEOUT",
                        "choices": [{"message": {"content": "", "role": "assistant", "tool_calls": None}}]}

            try:
                loop = asyncio.get_running_loop()
                # 在默认的线程池执行器中运行同步函数
                result_response = await loop.run_in_executor(
                    None,  # 使用默认执行器 (ThreadPoolExecutor)
                    client.chat.asyncCompletions.retrieve_completion_result,
                    task_id
                )
                # result_response = client.chat.asyncCompletions.retrieve_completion_result(id=task_id)
                # print(result_response)
                task_status = result_response.task_status

                if task_status == 'SUCCESS':
                    return result_response  # 成功，返回结果
                elif task_status == 'FAILED':
                    self.log_error(f"任务 {task_id} 执行失败。")
                    # 返回失败信息或特定的失败对象/字典
                    return {"task_id": task_id, "finish_reason": "FAILED",
                            "error_details": str(result_response)}
                else:  # PROCESSING 或 QUEUING
                    await asyncio.sleep(1)  # 等待1秒再查询，释放事件循环

            except Exception as poll_exc:
                self.log_error(f"轮询任务 {task_id} 时发生错误: {poll_exc}")
                # 返回错误信息
                return {"task_id": task_id, "finish_reason": "POLL_ERROR", "error": str(poll_exc)}

    async def _call_zhipuai_api_parallel(self, payloads: list[dict] = [], max_wait_times: int = 100) -> list[dict]:
        """ 并行调用ZhipuAI的API """
        if not payloads:
            return []
        try:
            client = ZhipuAI(api_key=self.model_token)
            submission_tasks = []

            # 步骤1: 并发提交所有任务创建请求
            self.log_info(f"并发提交 {len(payloads)} 个异步任务...")
            submission_start_time = time.time()
            for payload in payloads:
                # 为每个 payload 创建一个异步提交任务
                submission_tasks.append(self.async_completions_create(client, **payload))

            # 使用 asyncio.gather 并发执行提交任务
            # return_exceptions=True 使得一个任务失败不会取消其他任务，并返回异常对象
            submission_results = await asyncio.gather(*submission_tasks, return_exceptions=True)
            submission_duration = time.time() - submission_start_time
            print(f"任务提交阶段耗时: {submission_duration:.2f}秒")

            task_ids = []
            # 处理提交结果，收集 task_id，处理提交时就发生的错误
            for i, res in enumerate(submission_results):
                if isinstance(res, Exception):
                    # 记录提交时的错误，并决定如何处理（例如，为该任务返回一个错误标记）
                    self.log_error(f"提交任务时发生错误 (payload index {i}): {res}")
                    # 可以选择抛出异常中断整个过程，或用 None 或错误标记占位
                    # 这里我们用 None 占位，后续轮询时会跳过 None
                    task_ids.append(None)
                elif res and hasattr(res, 'id'):
                    task_ids.append(res.id)
                else:
                    # 提交成功但没有返回预期的带 id 的对象
                    self.log_error(f"提交任务成功但返回无效结果 (payload index {i}): {res}")
                    task_ids.append(None)

            # 过滤掉提交失败的任务 ID
            valid_task_ids = [tid for tid in task_ids if tid is not None]
            if not valid_task_ids:
                self.log_warning("所有任务提交均失败或返回无效结果。")
                # 根据需要返回空列表或包含错误信息的列表
                # 创建对应数量的错误响应
                error_responses = [
                    {"choices": [{"message": {"content": f"Submission failed for payload {i}", "role": "assistant"}}],
                     "finish_reason": "ERROR"} for i in range(len(payloads))]
                return error_responses

            # 步骤2: 并发轮询所有成功提交的任务的状态
            self.log_info(f"并发轮询 {len(valid_task_ids)} 个任务的结果...")
            polling_start_time = time.time()
            polling_tasks = []
            for task_id in valid_task_ids:
                polling_tasks.append(self._poll_single_task(client, task_id, max_wait_times))

            # 使用 asyncio.gather 并发执行轮询任务
            polling_results = await asyncio.gather(*polling_tasks, return_exceptions=True)
            polling_duration = time.time() - polling_start_time
            print(f"任务轮询阶段耗时: {polling_duration:.2f}秒")

            # 步骤3: 整理最终结果，保持与原始 payloads 的对应关系
            final_responses = []
            polling_result_map = {  # 创建一个映射，方便根据 task_id 查找轮询结果
                (res.id if hasattr(res, 'id') else (res.get('task_id') if isinstance(res, dict) else None)): res
                for res in polling_results if res  # 过滤掉 gather 可能返回的异常
            }
            # 加入处理 gather 本身可能抛出的异常（如果 return_exceptions=True 则异常会包含在 polling_results 里）
            for i, res in enumerate(polling_results):
                if isinstance(res, Exception):
                    original_task_id = valid_task_ids[i]  # 获取对应的 task_id
                    self.log_error(f"轮询任务 {original_task_id} 时 gather 捕获到异常: {res}")
                    polling_result_map[original_task_id] = {"task_id": original_task_id,
                                                            "finish_reason": "GATHER_ERROR", "error": str(res)}

            original_index = 0
            for task_id in task_ids:  # 遍历原始提交顺序的 task_id 列表 (包含 None)
                if task_id is None:
                    # 提交失败的任务，添加错误占位符
                    final_responses.append({"choices": [{"message": {
                        "content": f"Submission failed for original payload index {original_index}",
                        "role": "assistant"}}], "finish_reason": "SUBMISSION_ERROR"})
                else:
                    # 查找该 task_id 对应的轮询结果
                    result = polling_result_map.get(task_id)
                    if result:
                        final_responses.append(result)
                    else:
                        # 理论上不应该发生，除非 gather 异常处理逻辑有误
                        self.log_error(f"未能找到任务 {task_id} 的轮询结果！")
                        final_responses.append({"choices": [{"message": {
                            "content": f"Polling result missing for task {task_id}", "role": "assistant"}}],
                                                "finish_reason": "MISSING_RESULT"})
                original_index += 1

            call_duration = time.time()  # 记录总时间
            self.log_info(
                f"全部异步指令处理完成，{payloads[0].get('model')}模型总耗时: {call_duration - submission_start_time:.2f}秒")
            print(
                f"全部指令执行完成，{payloads[0].get('model')}模型耗时: {call_duration - submission_start_time:.2f}秒")  # 保留原有打印

            return final_responses

        except Exception as e:
            # 这个异常捕获主要处理 setup client 或 gather 之前的逻辑错误
            self.log_error(f"调用 {payloads[0].get('model')} API 并行处理框架时发生意外错误: {str(e)}")
            # 决定是抛出异常还是返回错误列表
            # raise # 重新抛出，让上层处理
            # 或者返回错误列表
            error_msg = f"Error in parallel processing: {str(e)}"
            return [{"choices": [{"message": {"content": error_msg, "role": "assistant"}}],
                     "finish_reason": "FRAMEWORK_ERROR"} for _ in payloads]

    def _call_other_api_parallel(self, payloads: list[dict] = [], max_wait_times: int = 100) -> list[dict]:
        """ 并行调用其他模型API """
        try:
            with ThreadPoolExecutor() as executor:
                futures = [executor.submit(self._call_other_api, payload) for payload in
                           payloads]
                responses = [future.result() for future in futures]
            return responses

        except Exception as e:
            self.log_error(f"并行调用{payloads[0].get('model')}出错：{str(e)}")
            raise ValueError(f"并行调用{payloads[0].get('model')}出错：{str(e)}")


def llm_chat(messages: list = [], model_name: str = "GLM_4_FLASH", stream: bool = False, temperature: float = 0.5,
             tools: list = [], tool_choice="auto", **args):
    logger = Logging()
    try:
        model_config = CONFIG["MODELS"][model_name]  # 只需修改模型名称（名称在config.py中查看），即可切换模型

        payload = {
            "model": model_config.get("name", None),
            "messages": messages,
            "stream": stream,
            "temperature": temperature,
            **({"tools": tools} if tools else {}),
            **({"tool_choice": tool_choice} if tools else {}),
            **args
        }

        # 创建LLMClient实例并调用
        client = LLMClient(
            model_name=model_config["name"],
            model_token=model_config["token"],
            model_api_url=model_config["api_url"]
        )

        response = client._call_llm(payload)
        logger.log_info(f"用户输入指令: \n{str(messages)}")

        answer = ""

        # 假设只有一个返回结果
        if isinstance(response, dict):
            response = OpenAIObject(response)

        if hasattr(response.choices[0].message, 'tool_calls') and response.choices[0].message.tool_calls:
            for tool_call in response.choices[0].message.tool_calls:
                answer += f"[{tool_call.function.name}：{tool_call.function.arguments}]  "
            logger.log_info(f"模型调用工具: \n{answer}")
        else:
            logger.log_info(f"模型返回结果: \n{str(response.choices[0].message.content)}")
        return response
    except Exception as e:
        logger.log_error(f"程序执行出错: {str(e)}")
        raise


async def llm_chat_parallel(max_wait_times: int = 40, messages_list: list = [], model_name: str = "GLM_4_FLASH",
                            temperature: float = 0.5, tools: list = [], tool_choice="auto", **args):
    """ 并行调用模型回答多个问题"""

    logger = Logging()
    try:
        model_config = CONFIG["MODELS"][model_name]
        payloads = []
        for message in messages_list:
            payload = {
                "model": model_config.get("name", None),
                "messages": message,
                "temperature": temperature,
                **({"tools": tools} if tools else {}),
                **({"tool_choice": tool_choice} if tools else {}),
                **args
            }
            payloads.append(payload)

        # 创建LLMClient实例
        client = LLMClient(
            model_name=model_config["name"],
            model_token=model_config["token"],
            model_api_url=model_config["api_url"]
        )

        # 使用 await 调用异步方法
        responses = await client._call_llm_parallel(payloads=payloads, max_wait_times=max_wait_times)

        # 将返回结果统一转换为 OpenAIObject 格式（如果需要的话）
        # 需要处理 responses 中可能包含的错误字典或超时字典
        processed_responses = []

        if isinstance(responses[0], dict):
            # 批量将OneAPI格式转换为OpenAI格式
            processed_responses = [OpenAIObject(res) for res in responses]
        else:
            processed_responses = responses

        # for res in responses:
        #     if hasattr(res, 'task_status') and res.task_status == 'SUCCESS':
        #         processed_responses.append(OpenAIObject(res)) # 假设 OpenAIObject 可以处理 Zhipu 成功返回的结构
        #     elif isinstance(res, dict) and res.get("finish_reason") in ["TIMEOUT", "FAILED", "POLL_ERROR", "GATHER_ERROR", "SUBMISSION_ERROR", "FRAMEWORK_ERROR", "MISSING_RESULT", "ERROR"]:
        #         # 对于超时或错误，创建一个模拟的 OpenAIObject 或保持字典格式，包含必要信息
        #         # 这里创建一个简单的模拟对象/字典
        #         mock_message = {"content": res.get("error") or res.get("error_details") or "", "role": "assistant"}
        #         mock_choice = {"message": mock_message, "finish_reason": res.get("finish_reason")}
        #         # 注意：确保 OpenAIObject 构造函数或后续代码能处理这种模拟结构
        #         processed_responses.append(OpenAIObject({"choices": [mock_choice]})) # 假设 OpenAIObject 可以这样构造
        #     else:
        #          logger.log_warning(f"收到未预期的响应类型: {type(res)}, {res}")
        #          # 添加一个通用错误表示
        #          mock_message = {"content": "Unknown response type", "role": "assistant"}
        #          mock_choice = {"message": mock_message, "finish_reason": "UNKNOWN_ERROR"}
        #          processed_responses.append(OpenAIObject({"choices": [mock_choice]}))

        for index, res in enumerate(processed_responses):
            # 检查 res 是否有效以及是否有 choices
            if not res or not hasattr(res, 'choices') or not res.choices:
                logger.log_warning(f"处理响应索引 {index} 时发现无效结构: {res}")
                continue  # 跳过无效响应

            choice = res.choices[0]  # 获取第一个 choice
            # 检查 choice 是否有效以及是否有 message
            if not choice or not hasattr(choice, 'message'):
                logger.log_warning(f"处理响应索引 {index} 时发现无效 choice 结构: {choice}")
                continue  # 跳过无效 choice

            message = choice.message
            finish_reason = choice.finish_reason

            answer = ""
            # 检查是否是成功的工具调用
            if finish_reason != 'stop' and hasattr(message, 'tool_calls') and message.tool_calls:
                logger.log_info(f"输入指令为: \n{str(messages_list[index])}")
                for tool_call in message.tool_calls:
                    # 添加检查确保 tool_call 结构正确
                    if hasattr(tool_call, 'function') and hasattr(tool_call.function, 'name') and hasattr(
                            tool_call.function, 'arguments'):
                        answer += f"[{tool_call.function.name}：{tool_call.function.arguments}]  "
                    else:
                        logger.log_warning(f"响应索引 {index} 的 tool_call 结构不完整: {tool_call}")
                logger.log_info(f"模型调用工具: \n{answer}")
            # 检查是否是成功的文本回复
            elif finish_reason == 'stop' and hasattr(message, 'content'):
                logger.log_info(f"输入指令为: \n{str(messages_list[index])}")
                logger.log_info(f"模型返回结果: \n{str(message.content)}")
            # 处理其他情况（超时、错误等）
            else:
                logger.log_info(f"输入指令为: \n{str(messages_list[index])}")
                logger.log_warning(f"模型返回非预期状态: finish_reason='{finish_reason}', message='{message}'")

        return processed_responses

    except Exception as e:
        logger.log_error(f"并行调用api时执行出错: {str(e)}")
        # 可以考虑返回包含错误信息的列表，而不是直接抛出
        # raise
        error_msg = f"Error in llm_chat_parallel: {str(e)}"
        return [OpenAIObject({"choices": [
            {"message": {"content": error_msg, "role": "assistant"}, "finish_reason": "FRAMEWORK_ERROR"}]})] * len(
            messages_list)


# 示例使用
if __name__ == '__main__':
    messages = [{"role": "user", "content": "你好啊"}]
    response = llm_chat(messages=messages, model_name="O3_MINI_HIGH", temperature=0.)
    print(response['choices'][0]['message']['content'])
    print("done")

    # # 并行调用llm api
    # commands = [
    #     "作为童话之王，请以始终保持一颗善良的心为主题，写一篇简短的童话故事。故事应能激发孩子们的学习兴趣和想象力，同时帮助他们更好地理解和接受故事中蕴含的道德和价值观。",
    #     "解释如何使用Python进行数据分析。",
    #     "撰写一份关于人工智能未来发展的报告摘要。",
    # ]
    # messages_list = [
    #     [{"role": "user", "content": command}] for command in commands
    # ]
    # responses = llm_chat_parallel(messages_list=messages_list, model_name="GLM_4_FLASH", max_wait_times=40)
    # print("done")
