import json
import time
import requests
from typing import Optional
from zhipuai import ZhipuAI

from model_config.config import CONFIG, GLM_4_MODEL_LIST
from utils.log_recording import Logging, log_conversation_to_csv


class LLMClient(Logging):
    def __init__(self, model_name: str, model_token: str, model_api_url: Optional[str] = None):
        # 初始化模型配置
        super().__init__()
        self.model_name = model_name
        self.model_token = model_token
        self.model_api_url = model_api_url

    def _call_llm(self, payload: dict) -> dict:
        """ 调用LLM API """
        try:
            if payload is None or payload['model'] is None:
                raise ValueError("传入的payload不正确")

            self.log_info(f"开始调用模型：{payload.get('model')}")

            # 根据模型类型选择不同的处理方式
            response = None
            if payload.get('model') in GLM_4_MODEL_LIST:
                response = self._call_zhipuai_api(payload)
            else:
                response = self._call_other_api(payload)
            log_conversation_to_csv(json.dumps(payload.get('messages'), ensure_ascii=False), response)
            self.log_info("模型调用完成")
            return response

        except Exception as e:
            self.log_error(f"调用LLM时发生错误: {str(e)}")
            raise

    def _call_llm_parallel(self, payloads: list[dict]=[], max_wait_times: int=40) -> list[dict]:
        """ 并行调用LLM API """
        try:
            if payloads is None or payloads[0]['model'] is None:
                raise ValueError("传入的payload不正确")

            self.log_info(f"开始并行调用模型：{payloads[0].get('model')}")

            # 根据模型类型选择不同的处理方式
            if payloads[0].get('model') in GLM_4_MODEL_LIST:
                responses = self._call_zhipuai_api_parallel(payloads=payloads, max_wait_times=max_wait_times)
            else:
                self.log_warning("暂不支持非智谱API的并行调用")
                raise ValueError("暂不支持非智谱API的并行调用")
            self.log_info("并行调用全部完成")

            # 记录模型的问答内容
            for index, res in enumerate(responses):
                log_conversation_to_csv(json.dumps(payloads[index].get("messages"), ensure_ascii=False), res)

            return responses

        except Exception as e:
            self.log_error(f"调用LLM时发生错误: {str(e)}")
            raise

    def _call_zhipuai_api(self, payload: dict) -> dict:
        """调用ZhipuAI的API"""
        try:
            client = ZhipuAI(api_key=self.model_token)
            call_start_time = time.time()
            response = client.chat.completions.create(**payload)
            call_duration = time.time() - call_start_time
            self.log_info(f"{payload.get('model')}模型回复耗时: {call_duration:.2f}秒")

            # 记录token消耗信息
            # self.log_info(
            #     f"Token消耗（prompt: {response.usage.prompt_tokens}, "
            #     f"completion: {response.usage.completion_tokens}, "
            #     f"total: {response.usage.total_tokens}）"
            # )
            return response

        except Exception as e:
            self.log_error(f"调用{payload.get('model')} API失败: {str(e)}")
            raise ValueError(f"调用{payload.get('model')} API出错: {str(e)}")

    def _call_other_api(self, payload: dict) -> dict:
        """调用其他模型API"""
        headers = {
            "Authorization": f"Bearer {self.model_token}",
            "Content-Type": "application/json"
        }

        try:
            call_start_time = time.time()
            response = requests.post(self.model_api_url, json=payload, headers=headers)
            call_duration = time.time() - call_start_time
            self.log_info(f"{payload.get('model')}模型回复耗时: {call_duration:.2f}秒")

            response.raise_for_status()
            response_json = response.json()

            # 检查响应结构
            if not response_json.get('choices'):
                raise ValueError(f"{payload.get('model')} API响应数据格式错误: {response_json}")

            return response_json

        except requests.RequestException as e:
            self.log_error(f"{payload.get('model')} API请求失败: {str(e)}")
            raise ValueError(f"调用{payload.get('model')} API出错: {str(e)}")
        except KeyError as e:
            self.log_error(f"{payload.get('model')} API响应格式错误: {str(e)}")
            raise ValueError(f"响应中缺少必需的字段: {str(e)}")
        except Exception as e:
            self.log_error(f"未知错误: {str(e)}")
            raise ValueError(f"未知错误: {str(e)}")

    def _call_zhipuai_api_parallel(self, payloads: list[dict] = [], max_wait_times: int = 100) -> list[dict]:
        """ 并行调用ZhipuAI的API """
        try:
            client = ZhipuAI(api_key=self.model_token)
            task_ids = []

            for payload in payloads:
                res = client.chat.asyncCompletions.create(**payload)
                task_ids.append(res.id)

            call_start_time = time.time()
            responses = []
            for task_id in task_ids:
                task_status = ''
                get_cnt = 0
                while task_status != 'SUCCESS' and task_status != 'FAILED' and get_cnt <= max_wait_times:
                    result_response = client.chat.asyncCompletions.retrieve_completion_result(id=task_id)
                    print(result_response)
                    task_status = result_response.task_status

                    if task_status != 'SUCCESS' and task_status != 'FAILED':
                        time.sleep(1)
                    else:
                        responses.append(result_response)
                    get_cnt += 1
                if get_cnt > max_wait_times:
                    self.log_warning(f"任务{task_id}超时未完成")
                    responses.append({"choices": [{"message": {"content": "", "role":"assistant", "tool_calls":None}}], "finish_reason": "TIMEOUT", "index": 0})
            call_duration = time.time()
            print(f"全部指令执行完成，{payloads[0].get('model')}模型耗时: {call_duration - call_start_time:.2f}秒")

            return responses

        except Exception as e:
            self.log_error(f"调用{payloads[0].get('model')} API失败: {str(e)}")
            raise ValueError(f"调用{payloads[0].get('model')} API出错: {str(e)}")

def llm_chat(messages:list=[],model_name:str="GLM_4_FLASH",stream:bool=False,temperature:float=0.5,tools: list=[],tool_choice="auto",**args):
    logger = Logging()
    try:
        model_config = CONFIG["MODELS"][model_name] # 只需修改模型名称（名称在config.py中查看），即可切换模型

        payload = {
            "model": model_config.get("name", None),
            "messages": messages,
            "stream": stream,
            "temperature": temperature,
            "tools": tools,
            "tool_choice": tool_choice,
            **args
        }

        # 创建LLMClient实例并调用
        client = LLMClient(
            model_name=model_config["name"],
            model_token=model_config["token"],
            model_api_url=model_config["api_url"]
        )

        response = client._call_llm(payload)
        logger.log_info(f"用户输入指令: \n{str(messages)}")

        answer = ""

        # 假设只有一个返回结果
        if isinstance(response, dict):
            if response['choices'][0]['message'].get('tool_calls'):
                for tool_call in response['choices'][0]['message']['tool_calls']:
                    answer += f"[{tool_call['function']['name']}：{tool_call['function']['arguments']}]  "
                logger.log_info(f"模型调用工具: \n{answer}")
            else:
                logger.log_info(f"模型返回结果: \n{str(response['choices'][0]['message']['content'])}")
            return response
        else:
            if response.choices[0].message.tool_calls:
                for tool_call in response.choices[0].message.tool_calls:
                    answer += f"[{tool_call.function.name}：{tool_call.function.arguments}]  "
                logger.log_info(f"模型调用工具: \n{answer}")
            else:
                logger.log_info(f"模型返回结果: \n{str(response.choices[0].message.content)}")
            return response
    except Exception as e:
        logger.log_error(f"程序执行出错: {str(e)}")
        raise



def llm_chat_parallel(max_wait_times: int=40, messages_list:list=[],model_name:str="GLM_4_FLASH",temperature:float=0.5,tools: list=[],tool_choice="auto",**args):
    """ 并行调用模型回答多个问题 """

    logger = Logging()
    try:
        model_config = CONFIG["MODELS"][model_name]
        payloads = []
        for message in messages_list:
            payload = {
                "model": model_config.get("name", None),
                "messages": message,
                "temperature": temperature,
                "tools": tools,
                "tool_choice": tool_choice,
                **args
            }
            payloads.append(payload)

        # 创建LLMClient实例并调用
        client = LLMClient(
            model_name=model_config["name"],
            model_token=model_config["token"],
            model_api_url=model_config["api_url"]
        )

        responses = client._call_llm_parallel(payloads=payloads, max_wait_times=max_wait_times)


        for index, res in enumerate(responses):
            # 假设只有一个返回结果
            answer = ""
            if res.choices[0].message.tool_calls:
                logger.log_info(f"输入指令为: \n{str(messages_list[index])}")
                for tool_call in res.choices[0].message.tool_calls:
                    answer += f"[{tool_call.function.name}：{tool_call.function.arguments}]  "
                logger.log_info(f"模型调用工具: \n{answer}")
            else:
                logger.log_info(f"输入指令为: \n{str(messages_list[index])}")
                logger.log_info(f"模型返回结果: \n{str(res.choices[0].message.content)}")
        return responses

    except Exception as e:
        logger.log_error(f"并行调用api时执行出错: {str(e)}")
        raise

# 示例使用
if __name__ == '__main__':
    messages = [{"role": "user", "content": "你好啊"}]
    response = llm_chat(messages=messages, model_name="O3_MINI_HIGH", temperature=0.)
    print(response['choices'][0]['message']['content'])
    print("done")


    # # 并行调用llm api
    # commands = [
    #     "作为童话之王，请以始终保持一颗善良的心为主题，写一篇简短的童话故事。故事应能激发孩子们的学习兴趣和想象力，同时帮助他们更好地理解和接受故事中蕴含的道德和价值观。",
    #     "解释如何使用Python进行数据分析。",
    #     "撰写一份关于人工智能未来发展的报告摘要。",
    # ]
    # messages_list = [
    #     [{"role": "user", "content": command}] for command in commands
    # ]
    # responses = llm_chat_parallel(messages_list=messages_list, model_name="GLM_4_FLASH", max_wait_times=40)
    # print("done")
    