# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from dataclasses import dataclass
from typing import Any, Dict, List, Optional

from openai.types.chat import ChatCompletion
from tenacity import retry
from tenacity.stop import stop_after_attempt
from tenacity.wait import wait_exponential

from camel.agents import BaseAgent
from camel.configs import ChatGPTConfig
from camel.messages import ChatMessage, MessageType, SystemMessage
from camel.model_backend import ModelBackend, ModelFactory
from camel.typing import ModelType, RoleType
from camel.utils import (
    get_model_token_limit,
    num_tokens_from_messages,
    openai_api_key_required,
)
from chatad.utils import log_visualize


# try:
#     from openai.types.chat import ChatCompletion
#
#     openai_new_api = True  # new openai api version
# except ImportError:
#     openai_new_api = False  # old openai api version


@dataclass(frozen=True)
class ChatAgentResponse:
    r"""Response of a ChatAgent.

    Attributes:
        msgs (List[ChatMessage]): A list of zero, one or several messages.
            If the list is empty, there is some error in message generation.
            If the list has one message, this is normal mode.
            If the list has several messages, this is the critic mode.
        terminated (bool): A boolean indicating whether the agent decided
            to terminate the chat session.
        info (Dict[str, Any]): Extra information about the chat message.
    """
    msgs: List[ChatMessage]
    terminated: bool
    info: Dict[str, Any]

    @property
    def msg(self):
        if self.terminated:
            raise RuntimeError("error in ChatAgentResponse, info:{}".format(str(self.info)))
        if len(self.msgs) > 1:
            raise RuntimeError("Property msg is only available for a single message in msgs")
        elif len(self.msgs) == 0:
            if len(self.info) > 0:
                raise RuntimeError("Empty msgs in ChatAgentResponse, info:{}".format(str(self.info)))
            else:
                # raise RuntimeError("Known issue that msgs is empty and there is no error info, to be fix")
                return None
        return self.msgs[0]


class ChatAgent(BaseAgent):
    r"""Class for managing conversations of CAMEL Chat Agents.

    Args:
        system_message (SystemMessage): The system message for the chat agent.
        model (ModelType, optional): The LLM model to use for generating
            responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
        model_config (Any, optional): Configuration options for the LLM model.
            (default: :obj:`None`)
        message_window_size (int, optional): The maximum number of previous
            messages to include in the context window. If `None`, no windowing
            is performed. (default: :obj:`None`)
    """

    def __init__(
            self,
            system_message: SystemMessage,
            model: Optional[ModelType] = None,
            model_config: Optional[Any] = None,
            message_window_size: Optional[int] = None,
    ) -> None:

        self.system_message: SystemMessage = system_message
        self.role_name: str = system_message.role_name
        self.role_type: RoleType = system_message.role_type
        self.model: ModelType = (model if model is not None else ModelType.DEFAULT_MODEL)
        self.model_config: ChatGPTConfig = model_config or ChatGPTConfig()
        self.model_token_limit: int = get_model_token_limit(self.model)
        self.message_window_size: Optional[int] = message_window_size
        self.model_backend: ModelBackend = ModelFactory.create(self.model, self.model_config.__dict__)
        self.terminated: bool = False
        self.info: bool = False  # 判断对话是否结束
        self.init_messages()


    def reset(self) -> List[MessageType]:
        r"""Resets the :obj:`ChatAgent` to its initial state and returns the
        stored messages.

        Returns:
            List[MessageType]: The stored messages.
        """
        self.terminated = False
        self.init_messages()
        return self.stored_messages

    def get_info(
            self,
            id: Optional[str],
            usage: Optional[Dict[str, int]],
            termination_reasons: List[str],
            num_tokens: int,
    ) -> Dict[str, Any]:
        r"""
        Args:
            id (str, optional): 对话轮次id.
            usage (Dict[str, int], optional):
            termination_reasons (List[str]): 对话停止原因
            num_tokens (int): 使用的token数
        Returns:
            字典: 对话环节信息.
        """
        return {
            "id": id,
            "usage": usage,
            "termination_reasons": termination_reasons,
            "num_tokens": num_tokens,
        }

    def init_messages(self) -> None:
        r"""
        初始化信息列表
        """
        self.stored_messages: List[MessageType] = [self.system_message]

    def update_messages(self, message: ChatMessage) -> List[MessageType]:
        r"""
        存入新数据（input）
        """
        self.stored_messages.append(message)
        return self.stored_messages

    @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
    @openai_api_key_required
    def step(   # 给定一个输入，返回一个输出
            self,
            input_message: ChatMessage,
    ) -> ChatAgentResponse:
        r"""通过生成对输入消息的回复，执行聊天会话中的单个步骤.
        Args:
             input_message (ChatMessage): 发送给智能体的输入消息
        Returns:
            ChatAgentResponse: 一个包含以下内容的结构体：
        输出消息，一个指示聊天会话是否已终止的布尔值，以及有关聊天会话的信息
        """
        messages = self.update_messages(input_message)  # 将输入存入到对应角色的列表中
        # 检查这个列表的长度是否大于window-size，若大于，则缩减，同时保证新消息始终出现在列表开头
        if self.message_window_size is not None and len(  # 主要用于一个阶段有多轮对话时，可以存储前几轮对话的信息
                messages) > self.message_window_size:
            messages = [self.system_message
                        ] + messages[-self.message_window_size:]
                        
        # 检查是否使用deepseek-reasoner模型
        is_reasoner_model = str(self.model).lower() == "deepseek-reasoner"
        
        # 转换为DeepSeek消息格式
        # 将message转换为键值对 role-content
        # 此处message是一个ChatMessage类，content是其中一个属性，取出，和role一起形成键值对
        deepseek_messages = [message.to_deepseek_message() for message in messages]
        
        # 对于reasoner模型，确保第一条消息是用户消息
        if is_reasoner_model:
            # 查找除系统消息外的第一条消息
            first_non_system_msg_idx = next(
                (i for i, m in enumerate(deepseek_messages) if m["role"] != "system"),
                None
            )
            
            # 如果存在非系统消息并且第一条非系统消息不是用户消息
            if first_non_system_msg_idx is not None and deepseek_messages[first_non_system_msg_idx]["role"] != "user":
                # 将第一条非系统消息修改为用户消息
                deepseek_messages[first_non_system_msg_idx]["role"] = "user"
                
        num_tokens = num_tokens_from_messages(deepseek_messages, self.model)  # 计算对应token

        output_messages: Optional[List[ChatMessage]]
        info: Dict[str, Any]

        if num_tokens < self.model_token_limit:
            # model_backend.run 调用的是model_backend.py里的run函数，从模型的输出中计算prompt token、completion token、总token以及cost
            response = self.model_backend.run(messages=deepseek_messages)
            if not isinstance(response, ChatCompletion):
                raise RuntimeError("DeepSeek returned unexpected struct")
            output_messages = [
                ChatMessage(role_name=self.role_name, role_type=self.role_type,
                            meta_dict=dict(), **dict(choice.message))  # 都转换成ChatMessage类型
                for choice in response.choices
            ]
            info = self.get_info(  # 记录对话的具体信息
                response.id,
                response.usage,
                [str(choice.finish_reason) for choice in response.choices],
                num_tokens,
            )

            # output_messages[0]对应的就是message列表中第一个元素经deepseek之后的回答
            if output_messages[0].content.split("\n")[0].startswith("<好的>"): # [-1]最后一行
                self.info = True
        else:
            self.terminated = True  # 超过token数
            output_messages = []

            info = self.get_info(
                None,
                None,
                ["max_tokens_exceeded_by_camel"],
                num_tokens,
            )
        # CMessage（deepseek回答）  是否终止  对话的一些信息
        return ChatAgentResponse(output_messages, self.terminated, info)

    def __repr__(self) -> str:
        r"""Returns a string representation of the :obj:`ChatAgent`.

        Returns:
            str: The string representation of the :obj:`ChatAgent`.
        """
        return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})"
