# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT

import json
import logging
from typing import Annotated, Literal

from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langgraph.types import Command, interrupt
from langchain_mcp_adapters.client import MultiServerMCPClient

from src.agents.agents import coder_agent, research_agent, create_agent

from src.tools.search import LoggedTavilySearch
from src.tools import (
    crawl_tool,
    web_search_tool,
    python_repl_tool,
)

from src.config.agents import AGENT_LLM_MAP
from src.config.configuration import Configuration
from src.llms.llm import get_llm_by_type
from src.prompts.planner_model import Plan, StepType
from src.prompts.template import apply_prompt_template
from src.utils.json_utils import repair_json_output

from .types import State
from ..config import SEARCH_MAX_RESULTS, SELECTED_SEARCH_ENGINE, SearchEngine

logger = logging.getLogger(__name__)


# 工具函数定义
@tool
def handoff_to_planner(
    task_title: Annotated[str, "The title of the task to be handed off."],
    locale: Annotated[str, "The user's detected language locale (e.g., en-US, zh-CN)."],
):
    """移交任务给规划器的工具函数
    参数:
    - task_title: 要移交的任务标题
    - locale: 用户的语言环境设置
    """
    return

def background_investigation_node(state: State) -> Command[Literal["planner"]]:
    """背景调查节点 - 负责收集任务相关的背景信息
    
    功能流程:
    1. 从状态中获取最新消息内容作为查询
    2. 根据配置的搜索引擎(Tavily或其他)执行搜索
    3. 处理搜索结果并格式化
    4. 更新状态并转向planner节点
    """
    logger.info("background investigation node is running.")
    query = state["messages"][-1].content
    
    # 使用Tavily搜索引擎
    if SELECTED_SEARCH_ENGINE == SearchEngine.TAVILY:
        searched_content = LoggedTavilySearch(max_results=SEARCH_MAX_RESULTS).invoke(
            {"query": query}
        )
        background_investigation_results = None
        if isinstance(searched_content, list):
            # 格式化搜索结果
            background_investigation_results = [
                {"title": elem["title"], "content": elem["content"]}
                for elem in searched_content
            ]
        else:
            logger.error(f"Tavily search returned malformed response: {searched_content}")
    else:
        # 使用默认web搜索工具
        background_investigation_results = web_search_tool.invoke(query)
    
    # 返回命令，更新状态并转向planner
    return Command(
        update={
            "background_investigation_results": json.dumps(
                background_investigation_results, ensure_ascii=False
            )
        },
        goto="planner",
    )

def planner_node(state: State, config: RunnableConfig) -> Command[Literal["human_feedback", "reporter"]]:
    """规划器节点 - 负责生成执行计划
    
    功能流程:
    1. 获取当前迭代次数
    2. 应用规划器提示模板
    3. 处理背景调查结果
    4. 根据LLM类型生成计划
    5. 处理计划并决定下一步
    """
    logger.info("Planner generating full plan")
    configurable = Configuration.from_runnable_config(config)
    plan_iterations = state["plan_iterations"] if state.get("plan_iterations", 0) else 0
    messages = apply_prompt_template("planner", state, configurable)

    # - 在首次迭代时处理背景调查结果
    if (plan_iterations == 0 and 
        state.get("enable_background_investigation") and 
        state.get("background_investigation_results")):
        messages += [
            {
                "role": "user",
                "content": (
                    "background investigation results of user query:\n"
                    + state["background_investigation_results"]
                    + "\n"
                ),
            }
        ]

    # 根据LLM类型选择处理方式
    if AGENT_LLM_MAP["planner"] == "basic":
        llm = get_llm_by_type(AGENT_LLM_MAP["planner"]).with_structured_output(
            Plan,
            method="json_mode",
        )
    else:
        llm = get_llm_by_type(AGENT_LLM_MAP["planner"])

    # 检查迭代次数，迭代控制
    if plan_iterations >= configurable.max_plan_iterations:
        return Command(goto="reporter")

    # 获取LLM响应
    full_response = ""
    if AGENT_LLM_MAP["planner"] == "basic":
        response = llm.invoke(messages)
        full_response = response.model_dump_json(indent=4, exclude_none=True)
    else:
        response = llm.stream(messages)
        for chunk in response:
            full_response += chunk.content

    # 处理响应结果
    try:
        curr_plan = json.loads(repair_json_output(full_response))
    except json.JSONDecodeError:
        logger.warning("Planner response is not a valid JSON")
        if plan_iterations > 0:
            return Command(goto="reporter")
        else:
            return Command(goto="__end__")

    # 根据计划内容决定下一步，如果上下文足够则转向reporter，否则转向human_feedback
    if curr_plan.get("has_enough_context"):
        logger.info("Planner response has enough context.")
        new_plan = Plan.model_validate(curr_plan)
        return Command(
            update={
                "messages": [AIMessage(content=full_response, name="planner")],
                "current_plan": new_plan,
            },
            goto="reporter",
        )
    return Command(
        update={
            "messages": [AIMessage(content=full_response, name="planner")],
            "current_plan": full_response,
        },
        goto="human_feedback",
    )

def human_feedback_node(state) -> Command[Literal["planner", "research_team", "reporter", "__end__"]]:
    """人工反馈节点 - 处理用户对计划的反馈
    
    功能流程:
    1. 检查计划是否自动接受
    2. 处理用户反馈
    3. 更新计划状态
    4. 决定下一步流程
    """
    current_plan = state.get("current_plan", "")
    auto_accepted_plan = state.get("auto_accepted_plan", False)
    
    # 处理用户反馈
    if not auto_accepted_plan:
        feedback = interrupt("Please Review the Plan.")
        
        if feedback and str(feedback).upper().startswith("[EDIT_PLAN]"):
            return Command(
                update={"messages": [HumanMessage(content=feedback, name="feedback")]},
                goto="planner",
            )
        elif feedback and str(feedback).upper().startswith("[ACCEPTED]"):
            logger.info("Plan is accepted by user.")
        else:
            raise TypeError(f"Interrupt value of {feedback} is not supported.")

    # 处理计划迭代
    plan_iterations = state["plan_iterations"] if state.get("plan_iterations", 0) else 0
    goto = "research_team"
    
    try:
        current_plan = repair_json_output(current_plan)
        plan_iterations += 1
        new_plan = json.loads(current_plan)
        if new_plan["has_enough_context"]:
            goto = "reporter"
    except json.JSONDecodeError:
        logger.warning("Planner response is not a valid JSON")
        if plan_iterations > 0:
            return Command(goto="reporter")
        else:
            return Command(goto="__end__")

    return Command(
        update={
            "current_plan": Plan.model_validate(new_plan),
            "plan_iterations": plan_iterations,
            "locale": new_plan["locale"],
        },
        goto=goto,
    )

def coordinator_node(state: State) -> Command[Literal["planner", "background_investigator", "__end__"]]:
    """协调器节点 - 负责与用户沟通并协调整体流程
    
    功能流程:
    1. 应用协调器提示模板
    2. 获取LLM响应
    3. 处理工具调用
    4. 决定下一步流程
    """
    logger.info("Coordinator talking.")
    messages = apply_prompt_template("coordinator", state)
    response = (
        get_llm_by_type(AGENT_LLM_MAP["coordinator"])
        .bind_tools([handoff_to_planner])
        .invoke(messages)
    )

    goto = "__end__"
    locale = state.get("locale", "en-US")

    # 处理工具调用
    if len(response.tool_calls) > 0:
        goto = "planner"
        if state.get("enable_background_investigation"):
            goto = "background_investigator"
        try:
            for tool_call in response.tool_calls:
                if tool_call.get("name", "") != "handoff_to_planner":
                    continue
                if tool_locale := tool_call.get("args", {}).get("locale"):
                    locale = tool_locale
                    break
        except Exception as e:
            logger.error(f"Error processing tool calls: {e}")
    else:
        logger.warning("Coordinator response contains no tool calls. Terminating workflow execution.")

    return Command(update={"locale": locale}, goto=goto)

def reporter_node(state: State):
    """报告生成节点 - 生成最终报告
    
    功能流程:
    1. 获取当前计划
    2. 准备报告模板
    3. 处理观察结果
    4. 生成最终报告
    """
    logger.info("Reporter write final report")
    current_plan = state.get("current_plan")
    input_ = {
        "messages": [
            HumanMessage(
                f"# Research Requirements\n\n## Task\n\n{current_plan.title}\n\n## Description\n\n{current_plan.thought}"
            )
        ],
        "locale": state.get("locale", "en-US"),
    }
    invoke_messages = apply_prompt_template("reporter", input_)
    observations = state.get("observations", [])

    # 添加报告格式提醒
    invoke_messages.append(
        HumanMessage(
            content="IMPORTANT: Structure your report according to the format in the prompt...",
            name="system",
        )
    )

    # 添加观察结果
    for observation in observations:
        invoke_messages.append(
            HumanMessage(
                content=f"Below are some observations for the research task:\n\n{observation}",
                name="observation",
            )
        )

    # 生成报告
    response = get_llm_by_type(AGENT_LLM_MAP["reporter"]).invoke(invoke_messages)
    response_content = response.content
    logger.info(f"reporter response: {response_content}")

    return {"final_report": response_content}

def research_team_node(
    state: State,
) -> Command[Literal["planner", "researcher", "coder"]]:
    """Research team node that collaborates on tasks."""
    logger.info("Research team is collaborating on tasks.")
    current_plan = state.get("current_plan")
    if not current_plan or not current_plan.steps:
        return Command(goto="planner")
    if all(step.execution_res for step in current_plan.steps):
        return Command(goto="planner")
    for step in current_plan.steps:
        if not step.execution_res:
            break
    if step.step_type and step.step_type == StepType.RESEARCH:
        return Command(goto="researcher")
    if step.step_type and step.step_type == StepType.PROCESSING:
        return Command(goto="coder")
    return Command(goto="planner")

async def _execute_agent_step(state: State, agent, agent_name: str) -> Command[Literal["research_team"]]:
    """执行代理步骤的辅助函数
    
    参数:
    - state: State对象，包含当前工作流状态
    - agent: 要执行任务的代理实例
    - agent_name: 代理名称
    
    返回:
    - Command对象，指定下一步操作
    
    主要功能:
    1. 状态管理:
       - 从state获取当前计划和已有观察结果
       - 查找并获取下一个未执行的步骤
       - 收集已完成步骤的执行结果
    
    2. 任务准备:
       - 构建代理输入信息，包含:
         * 已完成步骤的研究发现
         * 当前任务的标题和描述
         * 语言环境设置
       - 为研究员代理添加特殊引用提醒
    
    3. 执行流程:
       - 异步调用代理执行任务
       - 获取执行响应内容
       - 更新步骤执行结果
       - 记录执行日志
    
    4. 状态更新:
       - 更新消息历史
       - 添加新的观察结果
       - 返回到研究团队节点继续执行
    
    工作流程:
    1. 获取当前未执行步骤 -> 2. 准备代理输入 -> 3. 执行代理任务 -> 4. 更新执行结果
    """
    current_plan = state.get("current_plan")
    observations = state.get("observations", [])

    # 查找未执行的步骤
    current_step = None
    completed_steps = []
    for step in current_plan.steps:
        if not step.execution_res:
            current_step = step
            break
        else:
            completed_steps.append(step)

    if not current_step:
        logger.warning("No unexecuted step found")
        return Command(goto="research_team")

    # 准备已完成步骤信息
    completed_steps_info = ""
    if completed_steps:
        completed_steps_info = "# Existing Research Findings\n\n"
        for i, step in enumerate(completed_steps):
            completed_steps_info += f"## Existing Finding {i+1}: {step.title}\n\n"
            completed_steps_info += f"<finding>\n{step.execution_res}\n</finding>\n\n"

    # 准备代理输入
    agent_input = {
        "messages": [
            HumanMessage(
                content=f"{completed_steps_info}# Current Task\n\n## Title\n\n{current_step.title}\n\n## Description\n\n{current_step.description}\n\n## Locale\n\n{state.get('locale', 'en-US')}"
            )
        ]
    }

    # 为研究员代理添加引用提醒
    if agent_name == "researcher":
        agent_input["messages"].append(
            HumanMessage(
                content="IMPORTANT: DO NOT include inline citations...",
                name="system",
            )
        )

    # 执行代理任务
    result = await agent.ainvoke(input=agent_input)
    response_content = result["messages"][-1].content

    # 更新步骤执行结果
    current_step.execution_res = response_content
    logger.info(f"Step '{current_step.title}' execution completed by {agent_name}")

    return Command(
        update={
            "messages": [HumanMessage(content=response_content, name=agent_name)],
            "observations": observations + [response_content],
        },
        goto="research_team",
    )

async def _setup_and_execute_agent_step(
    state: State,
    config: RunnableConfig,
    agent_type: str,
    default_agent,
    default_tools: list,
) -> Command[Literal["research_team"]]:
    """设置和执行代理步骤的辅助函数
    
    功能:
    1. 配置MCP服务器和工具
    2. 创建代理
    3. 执行代理任务
    """
    configurable = Configuration.from_runnable_config(config)
    mcp_servers = {}
    enabled_tools = {}

    # 提取MCP服务器配置
    if configurable.mcp_settings:
        for server_name, server_config in configurable.mcp_settings["servers"].items():
            if (server_config["enabled_tools"] and 
                agent_type in server_config["add_to_agents"]):
                mcp_servers[server_name] = {
                    k: v
                    for k, v in server_config.items()
                    if k in ("transport", "command", "args", "url", "env")
                }
                for tool_name in server_config["enabled_tools"]:
                    enabled_tools[tool_name] = server_name

    # 创建并执行代理
    if mcp_servers:
        async with MultiServerMCPClient(mcp_servers) as client:
            loaded_tools = default_tools[:]
            for tool in client.get_tools():
                if tool.name in enabled_tools:
                    tool.description = f"Powered by '{enabled_tools[tool.name]}'.\n{tool.description}"
                    loaded_tools.append(tool)
            agent = create_agent(agent_type, agent_type, loaded_tools, agent_type)
            return await _execute_agent_step(state, agent, agent_type)
    else:
        return await _execute_agent_step(state, default_agent, agent_type)

async def researcher_node(state: State, config: RunnableConfig) -> Command[Literal["research_team"]]:
    """研究员节点 - 执行研究任务
    
    功能:
    1. 设置研究员代理
    2. 执行研究任务
    3. 返回研究结果
    """
    logger.info("Researcher node is researching.")
    return await _setup_and_execute_agent_step(
        state,
        config,
        "researcher",
        research_agent,
        [web_search_tool, crawl_tool],
    )

async def coder_node(state: State, config: RunnableConfig) -> Command[Literal["research_team"]]:
    """程序员节点 - 执行代码分析任务
    
    功能:
    1. 设置程序员代理
    2. 执行代码分析
    3. 返回分析结果
    """
    logger.info("Coder node is coding.")
    return await _setup_and_execute_agent_step(
        state,
        config,
        "coder",
        coder_agent,
        [python_repl_tool],
    )
