import logging
import sys
from typing import Any, Dict, List, Optional

from dbgpt.agent.actions.action import ActionOutput
from dbgpt.agent.agents.agent_new import Agent
from dbgpt.agent.agents.agents_manage import mentioned_agents, participant_roles
from dbgpt.agent.agents.base_agent_new import ConversableAgent
from dbgpt.agent.agents.base_team import ManagerAgent
from dbgpt.agent.common.schema import Status
from dbgpt.agent.memory.base import GptsPlan
from dbgpt.core.interface.message import ModelMessageRoleType

from .planner_agent import PlannerAgent

logger = logging.getLogger(__name__)


class AutoPlanChatManager(ManagerAgent):
    """(In preview) A chat manager agent that can manage a team chat of multiple agents."""

    profile: str = "PlanManager"
    goal: str = "Advance the task plan generated by the planning agent. If the plan does not pre-allocate an agent, it needs to be coordinated with the appropriate agent to complete."
    constraints: List[str] = []
    desc: str = "Advance the task plan generated by the planning agent."

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    async def a_process_rely_message(
        self, conv_id: str, now_plan: GptsPlan, speaker: ConversableAgent
    ):
        rely_prompt = None
        rely_messages: List[Dict] = []

        if now_plan.rely and len(now_plan.rely) > 0:
            rely_tasks_list = now_plan.rely.split(",")
            rely_tasks = self.memory.plans_memory.get_by_conv_id_and_num(conv_id, [])
            if rely_tasks:
                rely_prompt = "Read the result data of the dependent steps in the above historical message to complete the current goal:"
                for rely_task in rely_tasks:
                    rely_messages.append(
                        {
                            "content": rely_task.sub_task_content,
                            "role": ModelMessageRoleType.HUMAN,
                        }
                    )
                    rely_messages.append(
                        {
                            "content": rely_task.result,
                            "role": ModelMessageRoleType.AI,
                        }
                    )
        return rely_prompt, rely_messages

    def select_speaker_msg(self, agents: List[Agent]):
        """Return the message for selecting the next speaker."""
        return f"""You are in a role play game. The following roles are available:
    {participant_roles(agents)}.
    Read the following conversation.
    Then select the next role from {[agent.name for agent in agents]} to play. The role can be selected repeatedly.Only return the role."""

    async def a_select_speaker(
        self,
        last_speaker: Agent,
        selector: ConversableAgent,
        now_goal_context: str = None,
        pre_allocated: str = None,
    ):
        """Select the next speaker."""

        agents = self.agents

        if pre_allocated:
            # Preselect speakers
            logger.info(f"Preselect speakers:{pre_allocated}")
            name = pre_allocated
            model = None
        else:
            # auto speaker selection
            # TODO selector a_thinking It has been overwritten and cannot be used.
            final, name, model = await selector.a_thinking(
                messages=[
                    {
                        "role": ModelMessageRoleType.HUMAN,
                        "content": f"""Read and understand the following task content and assign the appropriate role to complete the task.
                                    Task content: {now_goal_context}
                                    select the role from: {[agent.name for agent in agents]},
                                    Please only return the role, such as: {agents[0].name}""",
                    }
                ],
                prompt=self.select_speaker_msg(agents),
            )
            if not final:
                raise ValueError("Unable to select next speaker!")

        # If exactly one agent is mentioned, use it. Otherwise, leave the OAI response unmodified
        mentions = mentioned_agents(name, agents)
        if len(mentions) == 1:
            name = next(iter(mentions))
        else:
            logger.warning(
                f"GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:\n{name}"
            )

        # Return the result
        try:
            return self.agent_by_name(name), model
        except Exception as e:
            logger.exception(f"auto select speaker failed!{str(e)}")
            raise ValueError("Unable to select next speaker!")

    async def a_act(
        self,
        message: Optional[str],
        sender: Optional[ConversableAgent] = None,
        reviewer: Optional[ConversableAgent] = None,
    ) -> Optional[ActionOutput]:
        speaker = sender
        final_message = message
        for i in range(self.max_round):
            plans = self.memory.plans_memory.get_by_conv_id(self.agent_context.conv_id)

            if not plans or len(plans) <= 0:
                if i > 3:
                    return ActionOutput(
                        is_exe_success=False,
                        content="Retrying 3 times based on current application resources still fails to build a valid plan！",
                    )
                planner: ConversableAgent = (
                    await PlannerAgent()
                    .bind(self.memory)
                    .bind(self.agent_context)
                    .bind(self.llm_config)
                    .bind(self.resource_loader)
                    .bind_agents(self.agents)
                    .build()
                )

                is_success, plan_message = await planner.a_generate_reply(
                    recive_message={"content": message}, sender=self, reviewer=reviewer
                )
                await planner.a_send(
                    message=plan_message, recipient=self, request_reply=False
                )
            else:
                todo_plans = [
                    plan
                    for plan in plans
                    if plan.state in [Status.TODO.value, Status.RETRYING.value]
                ]
                if not todo_plans or len(todo_plans) <= 0:
                    ### The plan has been fully executed and a success message is sent to the user.
                    # complete
                    return ActionOutput(
                        is_exe_success=True,
                        content=final_message,  # work results message
                    )
                else:
                    try:
                        now_plan: GptsPlan = todo_plans[0]
                        current_goal_message = {
                            "content": now_plan.sub_task_content,
                            "current_goal": now_plan.sub_task_content,
                            "context": {
                                "plan_task": now_plan.sub_task_content,
                                "plan_task_num": now_plan.sub_task_num,
                            },
                        }
                        # select the next speaker
                        speaker, model = await self.a_select_speaker(
                            speaker,
                            self,
                            now_plan.sub_task_content,
                            now_plan.sub_task_agent,
                        )
                        # Tell the speaker the dependent history information
                        rely_prompt, rely_messages = await self.a_process_rely_message(
                            conv_id=self.agent_context.conv_id,
                            now_plan=now_plan,
                            speaker=speaker,
                        )
                        if rely_prompt:
                            current_goal_message["content"] = (
                                rely_prompt + current_goal_message["content"]
                            )

                        await self.a_send(
                            message=current_goal_message,
                            recipient=speaker,
                            reviewer=reviewer,
                            request_reply=False,
                        )
                        is_success, reply_message = await speaker.a_generate_reply(
                            recive_message=current_goal_message,
                            sender=self,
                            reviewer=reviewer,
                            rely_messages=rely_messages,
                        )
                        await speaker.a_send(
                            reply_message, self, reviewer, request_reply=False
                        )

                        plan_result = ""
                        final_message = reply_message["content"]
                        if is_success:
                            if reply_message:
                                action_report = reply_message.get("action_report", None)
                                if action_report:
                                    plan_result = action_report.get("content", "")
                                    final_message = action_report["view"]

                            ### The current planned Agent generation verification is successful
                            ##Plan executed successfully
                            self.memory.plans_memory.complete_task(
                                self.agent_context.conv_id,
                                now_plan.sub_task_num,
                                plan_result,
                            )
                        else:
                            plan_result = reply_message["content"]
                            self.memory.plans_memory.update_task(
                                self.agent_context.conv_id,
                                now_plan.sub_task_num,
                                Status.FAILED.value,
                                now_plan.retry_times + 1,
                                speaker.name,
                                "",
                                plan_result,
                            )
                            return ActionOutput(
                                is_exe_success=False, content=plan_result
                            )

                    except Exception as e:
                        logger.exception(
                            f"An exception was encountered during the execution of the current plan step.{str(e)}"
                        )
                        return ActionOutput(
                            is_exe_success=False,
                            content=f"An exception was encountered during the execution of the current plan step.{str(e)}",
                        )
        return ActionOutput(
            is_exe_success=False,
            content=f"Maximum number of dialogue rounds exceeded.{self.max_round}",
        )
