import sys
import os
import ollama
import json
import fitz  # PyMuPDF
from typing import List, Dict, Union, Any, Optional, Callable

try:
    # 尝试从上层目录的 base_code 导入
    parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    sys.path.append(parent_dir)
    from base_code import *
    from base_code.common import *
    from base_code.llm_client import ConfigurationError, LLMClient
except ImportError:
    print("❌ 错误: 无法找到 'base_code' 模块。请确保 pocketflow 代码位于正确的路径。")

from local_tools import *  # Global Tools
from prompt import *       # Prompt Manager
from context_engineering import ContextEngine, ReasoningStep
from memory_monitor import start_monitor_server, record_memory_event



# ==============================================================================
#  Agent记忆：包含了个体记忆&上下文的引用
# ==============================================================================
class AgentMemory:
    """
    存储 Agent 工作流中所有状态的数据类。
    增强版本：集成上下文工程功能
    """

    def __init__(self):
        self.raw_input: Optional[str] = None
        self.goal: Optional[str] = None
        self.next_goal: Optional[str] = None
        self.history: List[tuple] = []  # (thought, action, observation)

        self.plan: Optional[List[Dict]] = None  # 存储完整的计划
        self.current_step_id: Optional[int] = None  # 当前正在执行的步骤ID

        # Current step's data
        self.current_thought: Optional[str] = None
        self.current_action: Optional[Dict] = None
        self.current_observation: Any = None

        self.final_answer: Optional[str] = None
        self.error_message: Optional[str] = None

        # Control flags and counters
        self.iteration_count: int = 0
        self.max_iteration: int = 10   # 防止无限循环

        # 上下文工程相关
        self.context_engine: ContextEngine = ContextEngine()
        self.reasoning_steps: List[ReasoningStep] = []  # 用于上下文工程

    def reset_workflow_data(self):
        """在每次运行前重置临时数据。"""
        self.raw_input = None
        self.goal = None
        self.history = []
        self.plan = None
        self.current_step_id = None
        self.current_thought = None
        self.current_action = None
        self.current_observation = None
        self.final_answer = None
        self.error_message = None
        self.iteration_count = 0
        self.reasoning_steps = []  # 重置推理步骤


    # --- Getter 方法
    def get_current_step_details(self) -> Optional[Dict]:
        """
        根据 current_step_id 获取当前步骤的详细信息。这是一个纯粹的读取操作。
        """
        if not self.plan or self.current_step_id is None:
            return None

        for step in self.plan:
            if step.get('step_id') == self.current_step_id:
                return step
        return None

    def get_step_by_id(self, step_id: int) -> Optional[Dict]:
        """
        通过ID获取计划中的任意步骤，用于依赖分析等。
        """
        if not self.plan:
            return None
        for step in self.plan:
            if step.get('step_id') == step_id:
                return step
        return None

    # --- Setter 方法 (有副作用，由主流程控制) ---

    def set_plan(self, plan: List[Dict]):
        """设置计划并初始化起始步骤。"""
        self.plan = plan
        # 自动将当前步骤设置为计划的第一个未完成步骤，通常是1
        if self.plan and len(self.plan) > 0:
            self.current_step_id = self.plan[0].get('step_id', 1)
        else:
            self.current_step_id = None

    def advance_to_step(self, step_id: int):
        """由主流程调用，明确地将工作流推进到指定步骤。"""
        self.current_step_id = step_id
        # 重置当前循环的临时数据
        self.current_thought = None
        self.current_action = None
        # self.current_observation = None

    def add_reasoning_step(self, thought: str, action: str, result: str, success: bool = True):
        """添加推理步骤用于上下文分析"""
        step = ReasoningStep(thought, action, result, success)
        self.reasoning_steps.append(step)

    def get_compressed_context(self) -> Dict[str, Any]:
        """获取压缩后的上下文信息"""
        return self.context_engine.get_compressed_context(
            self.goal or "",
            self.reasoning_steps
        )

    def process_task_completion(self, success: bool) -> Dict[str, Any]:
        """处理任务完成，提取经验教训"""
        return self.context_engine.process_task(self.reasoning_steps, success)

    def add_history(self, thought: str, action: dict, observation: any):
        """
        向历史记录中添加一轮 ReAct 循环。
        修正了原代码中每次调用都重置history的bug。
        """
        # 确保 observation 是字符串，以便统一处理
        observation_str = json.dumps(observation, ensure_ascii=False) if isinstance(observation, (dict, list)) else str(
            observation)
        self.history.append((thought, action, observation_str))


# ==============================================================================
#  工作流节点定义 (Nodes)
# ==============================================================================
class PerceptionNode(AsyncNode):
    """感知节点：分析用户原始输入，确立核心目标 (Goal)。"""
    def __init__(self, prompt_manager: PromptManager, ollama_tool: Callable):
        super().__init__()
        self.prompt_manager = prompt_manager
        self.ollama_tool = ollama_tool

    async def prep_async(self,shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        print("\n🤔 [Perception] 分析用户意图中...")
        try:
            goal = await self.ollama_tool(
                system_prompt=self.prompt_manager.get_perception_prompt(),
                user_prompt=shared.raw_input,
                as_json=False
            )
            shared.goal = goal.strip()
            print(f"🎯 确立目标: {shared.goal}")
            return "success"
        except Exception as e:
            shared.error_message = f"感知阶段出错: {e}"
            return "error"

    async def post_async(self,shared,prep_res,exec_res):
        return exec_res


class PlanningNode(AsyncNode):
    """感知节点：分析用户原始输入，确立核心目标 (Goal)。"""

    def __init__(self, prompt_manager: PromptManager, llm_tool: Callable, registry: CommandRegistry):
        super().__init__()
        self.prompt_manager = prompt_manager
        self.llm_tool = llm_tool
        self.registry = registry

    async def prep_async(self, shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        print("\n🤔 [Planning] 制定计划中...")
        try:
            system_prompt = self.prompt_manager.get_planning_prompt(shared.goal, self.registry.get_tools_description())
            plan = await self.llm_tool(
                system_prompt=system_prompt,
                user_prompt="",
                as_json=True
            )
            shared.set_plan(plan["plan"])
            print(f"🎯 确立目标: {self.steps_to_human_readable(shared.plan)}")
            return "success"
        except Exception as e:
            shared.error_message = f"计划阶段出错: {e}"
            print( shared.error_message )
            return "error"

    async def post_async(self, shared, prep_res, exec_res):
        return exec_res

    def steps_to_human_readable(self, steps: list[dict]) -> str:
        """
        将结构化步骤列表转换为人类可读的字符串。
        """
        lines = ["任务步骤说明：\n"]
        for step in steps:
            step_id = step.get("step_id")
            desc = step.get("description", "")
            tools = ", ".join(step.get("estimated_tools", [])) or "无"
            deps = step.get("dependencies", [])
            deps_str = "、".join(map(str, deps)) if deps else "无"

            lines.append(f"{step_id}. 步骤 {step_id}")
            lines.append(f"   - 描述：{desc}")
            lines.append(f"   - 使用工具：{tools}")
            lines.append(f"   - 前置依赖：{deps_str}\n")

        return "\n".join(lines)


class DeliberationNode(AsyncNode):
    """决策节点 (Reason): ReAct 循环的核心，负责思考并决定下一步行动。"""

    def __init__(self, prompt_manager: PromptManager, ollama_tool: Callable, registry: CommandRegistry):
        super().__init__()
        self.prompt_manager = prompt_manager
        self.ollama_tool = ollama_tool
        self.registry = registry

    async def prep_async(self,shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        print("\n🤔 [Deliberation] 思考下一步行动...")
        if len(shared.history) >= shared.max_iteration:
            shared.error_message = "已达到最大思考步数，任务终止。"
            return "error"

        # 获取压缩后的上下文信息
        compressed_context = shared.get_compressed_context()

        # 构建增强的提示词
        tools_desc = self.registry.get_tools_description()

        # 使用压缩后的历史和经验提示
        enhanced_history = compressed_context["compressed_history"]
        experience_hint = compressed_context["hint"]

        system_prompt = self.prompt_manager.get_deliberation_prompt(
            shared.goal,
            json.dumps(shared.plan, ensure_ascii=False, indent=2),
            shared.get_current_step_details(),
            shared.history,
            tools_desc,
            experience_hint    # 添加经验提示
        )

        try:
            response = await self.ollama_tool(
                system_prompt=system_prompt,
                user_prompt="请生成下一步的思考和行动。",
                as_json=True
            )

            shared.current_thought = response.get("thought", "没有产生有效思考。")
            shared.current_action = response.get("action")

            print(f"  - 思考: {shared.current_thought}")

            if not shared.current_action or "name" not in shared.current_action:
                raise ValueError("LLM 返回的 action 格式不正确。")

            action_name = shared.current_action["name"]
            print(f"  - 计划行动: {shared.current_action}")

            if action_name == "finish":
                shared.final_answer = shared.current_action.get("args", {}).get("answer", "任务完成，但未提供明确答案。")
                return "finish"

            return "execute_tool"

        except Exception as e:
            shared.error_message = f"决策阶段出错: {e}"
            return "error"

    async def post_async(self, shared, prep_res, exec_res):
        return exec_res


class ToolExecutionNode(AsyncNode):
    """行动节点 (Act): 执行由 DeliberationNode 决定的工具。"""

    def __init__(self, registry: CommandRegistry):
        super().__init__()
        self.registry = registry

    async def prep_async(self,shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        action = shared.current_action
        tool_name = action["name"]
        tool_args = action.get("args", {})

        print(f"\n🛠️ [Execution] 执行工具: {tool_name} (参数: {tool_args})")

        try:
            tool_func = self.registry.get_tool(tool_name)

            # 针对llm_tool 特殊处理，避免生成非常长的text参数
            if tool_name == "llm_tool":
                # 提取第一个参数（例如 text）
                if "task" not in tool_args:
                    tool_args["task"] = shared.get_current_step_details()["description"]

                tool_args["text"] = shared.current_observation

            # 异步执行工具
            maybe_coro = tool_func(**tool_args)
            if asyncio.iscoroutine(maybe_coro):
                result = await maybe_coro
            else:
                result = maybe_coro

            shared.current_observation = str(result)
            print(f"  - 观察结果: \n{str(result)}")  # 打印结果
            return "success"
        except KeyError:
            shared.current_observation = f"错误: 工具 '{tool_name}' 未找到。"
            return "success"  # 返回 success 以便 Judgment 节点记录错误并继续循环
        except Exception as e:
            shared.current_observation = f"错误: 执行工具 '{tool_name}' 时发生异常: {e}"
            return "success"  # 同上

    async def post_async(self, shared, prep_res, exec_res):
        return exec_res


class JudgmentNode(AsyncNode):
    """判断节点：评估工具执行结果，并将其计入历史和上下文工程。"""

    def __init__(self, prompt_manager: PromptManager, llm_tool: Callable, registry: CommandRegistry):
        super().__init__()
        self.prompt_manager = prompt_manager
        self.llm_tool = llm_tool
        self.registry = registry

    async def prep_async(self, shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        print("\n⚖️ [Judgment] 评估结果并更新历史...")

        # 记录到标准历史
        # 这是执行判断前必须完成的步骤，确保历史是完整的
        if shared.current_thought is not None and shared.current_action is not None:
            shared.add_history(shared.current_thought, shared.current_action, shared.current_observation)

            # (可选) 为上下文工程记录推理步骤
            action_name = shared.current_action.get("name", "unknown")
            obs_str = str(shared.current_observation)
            success = not ("错误" in obs_str or "失败" in obs_str or "error" in obs_str.lower())
            shared.add_reasoning_step(
                thought=shared.current_thought,
                action=action_name,
                result=obs_str,
                success=success
            )

        # 构建用于决策的Prompt
        # 传入完整的上下文，包括历史记录
        current_step_details = shared.get_current_step_details()
        if not current_step_details:
            print("❌ [Judgment] 错误：无法获取当前步骤详情，流程无法继续。")
            shared.error_message = "内部错误：计划执行中丢失了当前步骤信息。"
            return "finish"  # 终止流程

        eval_prompt = self.prompt_manager.get_judgment_prompt(
            goal=shared.goal,
            full_plan=json.dumps(shared.plan, ensure_ascii=False, indent=2),
            cur_step=current_step_details,
            last_observation=str(shared.current_observation),
            history=shared.history,
            experience_hint=shared.get_compressed_context().get("hint", "")  # 假设存在
        )

        # 历史压缩（用于上下文判断）
        short_history = shared.context_engine.curator.get_recent_history_text() \
            if hasattr(shared.context_engine.curator, "get_recent_history_text") else ""

        # 调用 LLM 进行结构化决策
        try:
            # user_prompt 设置为一个简单的指令，因为所有上下文都在 system_prompt 中
            llm_response = await self.llm_tool(
                system_prompt=eval_prompt,
                user_prompt="请根据你作为决策核心的角色，严格按照指定的JSON格式输出你的决策。",
                as_json=True
            )
            print(f"🧠 [Judgment] LLM 决策: {json.dumps(llm_response, ensure_ascii=False)}")

        except Exception as e:
            print(f"⚠️ [Judgment] 调用 LLM 出错: {e}. 流程将重试当前步骤。")
            # 发生LLM调用错误时，一个稳健的选择是重试当前步骤
            return "continue"

            # 解析LLM的决策并更新智能体状态 (核心逻辑)
        if not llm_response or "decision" not in llm_response:
            print("⚠️ [Judgment] LLM 返回无效或不完整的决策。流程将重试当前步骤。")
            return "continue"

        decision = llm_response.get("decision")
        reasoning = llm_response.get("reasoning", "无")

        if decision == "CURRENT_STEP_COMPLETE":
            next_step_id = llm_response.get("next_step_id")
            if next_step_id is not None:
                print(f"✅ [Judgment] 步骤 {shared.current_step_id} 完成。理由: {reasoning}. 推进到步骤 {next_step_id}.")
                shared.advance_to_step(next_step_id)
                return "continue"
            else:
                # 如果 next_step_id 为 null，说明这是计划的最后一步
                print(f"✅ [Judgment] 计划的最后一步 {shared.current_step_id} 已完成。任务即将结束。")
                # 尝试从 observation 中获取最终答案，或设置一个默认完成信息
                shared.final_answer = shared.current_observation
                return "finish"

        elif decision == "OVERALL_GOAL_COMPLETE":
            print(f"🏁 [Judgment] 整体目标提前完成！理由: {reasoning}.")
            shared.final_answer = shared.current_observation
            return "finish"

        elif decision == "RETRY_STEP":
            print(f"🔄 [Judgment] 决定重试步骤 {shared.current_step_id}。理由: {reasoning}.")
            # 无需改变 current_step_id，循环将自然地重试
            # (高级) 可以将 reasoning 存入 shared.retry_reason，供下一轮 Deliberation 参考
            return "continue"

        elif decision == "PLAN_FAILURE":
            shared.error_message = f"计划执行失败。步骤 {shared.current_step_id} 遇到无法解决的问题。原因: {reasoning}"
            print(f"❌ [Judgment] 计划失败！{shared.error_message}")
            return "finish"

        else:
            print(f"⚠️ [Judgment] LLM 返回未知的决策类型 '{decision}'。为避免卡死，流程将终止。")
            shared.error_message = f"判断节点收到未知的决策指令: {decision}"
            return "finish"

    async def post_async(self, shared, prep_res, exec_res):
        print(f"exec_res: {exec_res}")
        return exec_res


class FinalizeNode(AsyncNode):
    """终结节点：在流程结束时整理并设置最终答案，同时处理上下文工程。"""

    def __init__(self, prompt_manager: PromptManager, ollama_tool: Callable):
        super().__init__()
        self.prompt_manager = prompt_manager
        self.ollama_tool = ollama_tool

    async def prep_async(self,shared):
        return shared

    async def exec_async(self, shared: AgentMemory):
        print("\n✅ [Finalize] 任务流程结束。")

        if shared.error_message:
            shared.final_answer = f"❌ 任务失败: {shared.error_message}"
            success = False
        elif not shared.final_answer:
            shared.final_answer = "ℹ️ 任务流程已完成，但没有生成最终答案。"
            success = False
        else:
            success = True

        # 完成最后整理
        # final_prompt = self.prompt_manager.get_finalize_prompt(
        #     goal=shared.goal,
        #     final_result=shared.final_answer
        # )
        #
        # print(f"eval_prompt: {final_prompt}")

        # 历史压缩（用于上下文判断）
        short_history = shared.context_engine.curator.get_recent_history_text() \
            if hasattr(shared.context_engine.curator, "get_recent_history_text") else ""

        user_prompt = "不要包含任何额外的解释或文本。"

        # 调用 LLM 判断任务进度
        try:
            # llm_response = await self.ollama_tool(
            #     system_prompt=final_prompt,
            #     user_prompt=user_prompt,
            #     as_json=False
            # )
            print(shared.final_answer)
        except Exception as e:
            print(f"⚠️ FinalizeNode 调用 LLM 出错: {e}")
            return


# ==============================================================================
#  ReActAgent (基于工作流的实现)
# ==============================================================================
class ReActAgent(AsyncFlow):
    """
    一个基于 pocketflow 工作流的 ReAct Agent。
    其内部逻辑由一个显式的“思考-行动-判断”循环构成。
    """

    def __init__(self, tools_system, local_llm="qwen3:4b", remote_llm="qwen3-plus", enable_monitoring=True, monitor_port=8000):
        super().__init__()

        # 1. 初始化核心组件
        self.registry = tools_system
        self.prompts = PromptManager()
        self.memory = AgentMemory()
        self.local_llm = local_llm
        self.remote_llm = remote_llm
        self.enable_monitoring = enable_monitoring
        self.monitor_port = monitor_port
        self.monitor_server = None

        async def _internal_ollama_call(system_prompt: str, user_prompt: str, as_json: bool = True):
            content = None
            try:
                response = await ollama.AsyncClient().chat(
                    model=self.local_llm,
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": user_prompt},
                    ],
                    format="json" if as_json else None
                )
                content = response["message"]["content"]
                if as_json:
                    # <--- 建议: 增强 JSON 解析的鲁棒性
                    return json.loads(content)
                return content
            except json.JSONDecodeError as e:
                raise ValueError(f"LLM 未能返回有效的 JSON 格式。内容: '{content}'") from e
            except Exception as e:
                raise e

        async def _internal_llm_call(system_prompt: str, user_prompt: str, as_json: bool = True):
            """
            一个集成了LLMClient的内部LLM调用函数。
            这就是您想要的，一个干净的集成点。
            """
            try:
                # 无状态调用：每次调用时创建一个新的客户端实例，通过 'async with' 管理其生命周期
                async with LLMClient() as client:
                    response = await client.chat(
                        system_prompt=system_prompt,
                        user_prompt=user_prompt,
                        as_json=as_json,
                        # 可以在这里覆盖配置中的参数
                        temperature=0.1
                    )
                    return response
            except Exception as e:
                print(f"Agent的LLM调用失败: {e}")
                raise e

        # 3. 定义工作流节点
        perception = PerceptionNode(self.prompts, _internal_ollama_call)
        planning = PlanningNode(self.prompts, _internal_llm_call, self.registry)
        deliberation = DeliberationNode(self.prompts, _internal_ollama_call, self.registry)
        # deliberation = DeliberationNode(self.prompts, _internal_llm_call, self.registry)  # 使用qwen3
        executor = ToolExecutionNode(self.registry)
        judgment = JudgmentNode(self.prompts, _internal_llm_call, self.registry)
        finalizer = FinalizeNode(self.prompts, _internal_ollama_call)

        # 4. 编排工作流
        self.start(perception)
        perception - "success" >> planning
        perception - "error" >> finalizer  # 感知失败直接结束

        planning - "success" >> deliberation
        planning - "error" >> finalizer

        deliberation - "execute_tool" >> executor
        deliberation - "finish" >> finalizer
        deliberation - "error" >> finalizer

        executor - "success" >> judgment  # 执行后总是进行判断

        judgment - "continue" >> deliberation  # 判断后返回思考，形成 ReAct 循环！
        judgment - "finish" >> finalizer
        judgment - "error" >> finalizer

        print("💡 ReActAgent (Workflow-based) 已初始化。")

    async def process_command_async(self, line: str) -> str:
        """Agent 的异步入口方法，处理用户输入。"""
        self.memory.reset_workflow_data()
        self.memory.raw_input = line

        await self.run_async(self.memory)

        return self.memory.final_answer or "流程结束，但没有最终答案。"

    def show_memo(self, verbose: bool = False):
        """打印当前Agent的记忆、洞察与统计信息（增强版）"""

        print("\n==============================")
        print("🧭 ReActAgent 状态调试面板")
        print("==============================")

        # 0️⃣ 当前状态概览
        print("🎯 当前任务目标:", self.memory.goal or "（未设定）")
        print("🔁 当前迭代次数:", self.memory.iteration_count, "/", self.memory.max_iteration)
        if self.memory.error_message:
            print("⚠️  错误信息:", self.memory.error_message)

        # 当前回合内容
        print("\n🧠 当前思考与执行状态:")
        print("  🪞 Thought:", (self.memory.current_thought or "（无）")[:200])
        print("  🧩 Action:", json.dumps(self.memory.current_action, ensure_ascii=False, indent=2)
        if self.memory.current_action else "（无）")
        print("  👁️  Observation:",
              (str(self.memory.current_observation)[:500] + "...")
              if self.memory.current_observation else "（无观察结果）")

        # 1️⃣ 上下文统计
        print("\n📊 上下文引擎统计:")
        stats = self.memory.context_engine.get_stats()
        print(f"  已处理任务数: {stats['tasks_processed']}")
        print(f"  当前洞察数量: {stats['total_insights']}")
        print(f"  估算记忆压缩率: {stats['memory_saved']}")

        # 2️⃣ 洞察回顾
        print("\n🧠 已提取的洞察 (Insights):")
        insights = self.memory.context_engine.curator.insights
        if not insights:
            print("  （暂无洞察）")
        else:
            for i, (trigger, ins) in enumerate(insights.items(), 1):
                print(f"  {i:02d}. [{trigger}] {ins.lesson} "
                      f"(权重={ins.weight:.1f}, 频次={ins.count})")

        # 3️⃣ 详细历史记录
        print("\n🕒 历史记录 (ReAct 回合):")
        if not self.memory.history:
            print("  （暂无历史记录）")
        else:
            for i, (thought, action, obs) in enumerate(self.memory.history[-5:], 1):
                print(f"  第 {i} 步：")
                print(f"    🧠 Thought: {thought[:200]}")
                print(f"    🧩 Action: {json.dumps(action, ensure_ascii=False)}")
                print(f"    👁️  Observation: {str(obs)[:300]}{'...' if len(str(obs)) > 300 else ''}")
                print("  -----------------------------")

        # 4️⃣ （可选）显示上下文引擎的内部步骤记忆
        if verbose:
            print("\n🪶 步骤压缩记忆 (ContextEngine):")
            steps = getattr(self.memory.context_engine.curator, "step_memory", [])
            if not steps:
                print("  （暂无步骤记忆）")
            else:
                for line in steps[-10:]:
                    print("  ", line)

        print("==============================\n")


