# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import copy
from typing import Dict, List, Optional, Sequence, Tuple

from camel.agents import (
    ChatAgent,
    TaskPlannerAgent,
    TaskSpecifyAgent,
)
from camel.agents.chat_agent import ChatAgentResponse
from camel.messages import ChatMessage, UserChatMessage
from camel.messages import SystemMessage
from camel.typing import ModelType, RoleType, TaskType, PhaseType
from chatad.utils import log_arguments, log_visualize


@log_arguments
class RolePlaying:
    r"""角色扮演在两个智能体之间进行.
    Args:
        assistant_role_name (str): 角色名
        user_role_name (str): 角色名
        task_prompt (str, optional): 任务
        model_type (ModelType, optional):
        task_type (TaskType, optional): TaskType.AI_SOCIETY
        assistant_agent_kwargs (Dict, optional): 传递给assistant agent的参数
        user_agent_kwargs (Dict, optional): 传递给user agent的参数
        extend_sys_msg_meta_dicts (List[Dict], optional): 扩展任务具体化元数据字典的字典
    """
    def __init__(
            self,
            assistant_role_name: str,
            user_role_name: str,
            task_prompt: str = "",
            assistant_role_prompt: str = "",
            user_role_prompt: str = "",
            model_type: ModelType = ModelType.DEFAULT_MODEL,
            task_type: TaskType = TaskType.AI_SOCIETY,
            assistant_agent_kwargs: Optional[Dict] = None,
            user_agent_kwargs: Optional[Dict] = None,
            extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
            background_prompt: Optional[str] = "",
    ) -> None:
        self.model_type = model_type
        self.task_type = task_type
        self.task_prompt = task_prompt

        sys_msg_meta_dicts = [dict(chatad_prompt=background_prompt, task=task_prompt)] * 2  # 重复两次（背景prompt+用户任务）
        # if (extend_sys_msg_meta_dicts is None and self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT,
        #                                                              TaskType.CHATAD]):
        #     extend_sys_msg_meta_dicts = [dict(assistant_role=assistant_role_name, user_role=user_role_name)] * 2  # 重复两次
        # if extend_sys_msg_meta_dicts is not None:
        #     sys_msg_meta_dicts = [{**sys_msg_meta_dict, **extend_sys_msg_meta_dict} for
        #                           sys_msg_meta_dict, extend_sys_msg_meta_dict in
        #                           zip(sys_msg_meta_dicts, extend_sys_msg_meta_dicts)]
        self.assistant_sys_msg = SystemMessage(role_name=assistant_role_name, role_type=RoleType.DEFAULT,
                                               meta_dict=sys_msg_meta_dicts[0],
                                               content=assistant_role_prompt.format(**sys_msg_meta_dicts[0]))
        # 此处可以将role-prompt内的chatad_prompt和task结合（assistant-role-prompt有占位符）
        self.user_sys_msg = SystemMessage(role_name=user_role_name, role_type=RoleType.DEFAULT,
                                          meta_dict=sys_msg_meta_dicts[1],
                                          content=user_role_prompt.format(**sys_msg_meta_dicts[1]))
        self.assistant_agent: ChatAgent = ChatAgent(self.assistant_sys_msg, model_type,
                                                    **(assistant_agent_kwargs or {}), )  # 此处使用｛｝，所以不会有message长度问题
        self.user_agent: ChatAgent = ChatAgent(self.user_sys_msg, model_type, **(user_agent_kwargs or {}), )

        # self.critic = None

    def init_chat(self, phase_type: PhaseType = None,
                  placeholders=None, phase_prompt=None):
        """通过重置助手和用户两个智能体，并再次使用聊天消息向这两个智能体发送系统消息，初始化对话。然后返回助手的介绍性消息和用户的响应消息。返回值：
    一个包含两个元素的元组，第一个是表示助手介绍性消息的 `AssistantChatMessage`，第二个是由多个 `ChatMessage` 组成的列表，代表用户的响应消息。
    """

        if placeholders is None:
            placeholders = {}
        self.assistant_agent.reset()
        self.user_agent.reset()

        # 检查是否使用的是deepseek-reasoner模型
        is_reasoner_model = str(self.model_type.value).lower() == "deepseek-reasoner"

        # 使用占位符placeholder对prompt进行填充，实现链式交付
        content = phase_prompt.format(
            **({"assistant_role": self.assistant_agent.role_name} | placeholders)
        )
        # 初始化对话，输入的是此阶段的prompt以及对应的assistant-role name
        user_msg = UserChatMessage(
            role_name=self.user_sys_msg.role_name,
            role="user",
            content=content
        )
        
        # 对于reasoner模型，确保不更新用户消息
        if not is_reasoner_model:
            pseudo_msg = copy.deepcopy(user_msg)
            pseudo_msg.role = "assistant"
            self.user_agent.update_messages(pseudo_msg)

        # 记录消息
        # ssistant_agent.system_message.content为用户的prompt  content为阶段prompt
        log_visualize(self.user_agent.role_name,
                      "**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content)
        return None, user_msg

    def process_messages(
            self,
            messages: Sequence[ChatMessage],
    ) -> ChatMessage:
        r"""
        返回处理后的信息
        """
        if len(messages) == 0:
            raise ValueError("No messages to process.")
        else:
            processed_msg = messages[0]

        return processed_msg

    def step(
            self,
            user_msg: ChatMessage,
            assistant_only: bool,
    ) -> Tuple[ChatAgentResponse, ChatAgentResponse]:
        assert isinstance(user_msg, ChatMessage), print("broken user_msg: " + str(user_msg))

        # 确保第一条消息是用户消息，明确role为user
        user_msg_rst = user_msg.set_user_role_at_backend()
        
        # 检查是否使用的是deepseek-reasoner模型
        is_reasoner_model = str(self.model_type.value).lower() == "deepseek-reasoner" # boolean
        
        # 如果是reasoner模型，确保助手消息在接收到用户消息后发送
        # if is_reasoner_model:
        #     # 直接将user消息发送给assistant，得到assistant的response
        #     assistant_response = self.assistant_agent.step(user_msg_rst)
        # else:
        #     # 原有逻辑
        assistant_response = self.assistant_agent.step(user_msg_rst)
            
        if assistant_response.terminated or assistant_response.msgs is None:  # 超过token数
            return (ChatAgentResponse([], False, {}),  # user和assistant直接返回为空
                ChatAgentResponse([], False, {}))
        # assistant_msg是ChatMessage类型
        assistant_msg = self.process_messages(assistant_response.msgs)  # msgs就是deepseek回复的具体内容，是一个列表，其中第一个是当前阶段的deepseek输出，所以process_messages取第一个
        if self.assistant_agent.info:  # 说明已经有了结论，则直接结束，返回即可
            return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info),
                    ChatAgentResponse([], False, {}))
        ########################################################################################################33
        # 如果没有得出结论，则将此轮assistant的输出放到信息列表中
        self.assistant_agent.update_messages(assistant_msg)

        if assistant_only:  # 用于self-task-improve
            return (
                ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info),
                ChatAgentResponse([], False, {})
            )

        # 用户响应逻辑
        assistant_msg_rst = assistant_msg.set_user_role_at_backend()  # 文本变成类对象，设置role为user
        user_response = self.user_agent.step(assistant_msg_rst)  # 将assistant的输出作为user的输入
        if user_response.terminated or user_response.msgs is None:
            return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info),
                    ChatAgentResponse([user_response], user_response.terminated, user_response.info))
        user_msg = self.process_messages(user_response.msgs)
        if self.user_agent.info:
            return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info),
                    ChatAgentResponse([user_msg], user_response.terminated, user_response.info))
        self.user_agent.update_messages(user_msg)

        return (
            ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info),
            ChatAgentResponse([user_msg], user_response.terminated, user_response.info),
        )
