"""React Agent implementation using LangGraph and LangChain with Gemini."""

import json
import re
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import uuid
import time

from langchain.schema import HumanMessage, SystemMessage
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph.state import CompiledStateGraph
from concurrent.futures import ThreadPoolExecutor, as_completed

from .agent_state import AgentState, AgentMemory, ThoughtActionObservation, create_initial_state
from tools.enhanced_tool_manager import EnhancedToolManager
from .planner import Planner
from .executor import PlanExecutor, ExecutionStatus
from .adaptive_replanner import AdaptiveReplanner, AdaptationContext
from .reflection_module import ReflectionModule
from memory import MemoryStore, ContextManager, VectorMemory, EpisodicMemory
from memory.episodic_memory import Episode
from config import Config
from llm.llm_manager import get_llm_manager, safe_llm_invoke


class ReactAgent:
    """React Agent that implements the Thought-Action-Observation pattern."""
    
    def __init__(self, verbose: bool = True, mode: str = "hybrid", use_mysql: bool = True, 
                 enable_reflection: bool = True, reflection_quality_threshold: float = 0.7):
        self.verbose = verbose
        self.mode = mode  # "react", "plan_execute", or "hybrid"
        self.enable_reflection = enable_reflection
        
        # Configure MySQL with root user and password
        mysql_config = {
            "host": "localhost",
            "port": 3306,
            "database": "react_agent_db",
            "user": "root",
            "password": "root"
        }
        
        # Use EnhancedToolManager with MySQL support
        self.tool_manager = EnhancedToolManager(use_mysql=use_mysql, mysql_config=mysql_config)
        
        if self.verbose:
            print(f"🔧 Initialized ReactAgent with {'MySQL' if use_mysql else 'in-memory'} database")
            if use_mysql:
                db_status = self.tool_manager.get_database_status()
                print(f"📊 Database Status: {db_status}")
        
        # Initialize enhanced memory system
        self.memory_store = MemoryStore()
        self.vector_memory = VectorMemory()
        self.episodic_memory = EpisodicMemory(self.memory_store, self.vector_memory)
        self.context_manager = ContextManager(self.memory_store, self.episodic_memory)
        
        # Initialize planner and executor for plan-execute mode
        self.planner = Planner(self.memory_store)
        self.executor = PlanExecutor(self.tool_manager, self.context_manager)
        
        # Initialize adaptive replanner for enhanced hybrid approach
        self.adaptive_replanner = AdaptiveReplanner(self.planner, self.tool_manager, self.context_manager)
        
        # Initialize reflection module
        self.reflection_module = ReflectionModule(
            quality_threshold=reflection_quality_threshold,
            max_refinement_iterations=3,
            verbose=self.verbose
        ) if enable_reflection else None
        
        # Legacy memory for backward compatibility
        self.memory = AgentMemory()
        
        # Initialize LLM manager
        self.llm_manager = get_llm_manager()
        self.llm = None  # Will be set per session
        
        # Create the graph
        self.graph = self._create_graph()

    def workflow_image(self):
        """显示流程图"""
        return self.graph.get_graph().draw_mermaid_png()

    def _create_graph(self) -> CompiledStateGraph:
        """Create the LangGraph state graph for the React Agent."""
        # Create workflow
        workflow = StateGraph(AgentState)
        
        # Add nodes based on mode
        if self.mode == "plan_execute":
            workflow.add_node("decide_approach", self._decide_approach_node)
            workflow.add_node("simple", self._simple_node)
            workflow.add_node("plan", self._plan_node)
            workflow.add_node("execute", self._execute_node)
            workflow.add_node("evaluate_execution", self._evaluate_execution_node)
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)
            
            workflow.set_entry_point("decide_approach")
            workflow.add_conditional_edges(
                "decide_approach",
                self._route_after_decision,
                {
                    "simple": "simple",
                    "think": "think",
                    "plan": "plan"
                }
            )

            # Simple
            workflow.add_edge("simple", END)

            # ReAct
            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )
            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")

            workflow.add_edge("plan", "execute")
            workflow.add_edge("execute", "evaluate_execution")
            workflow.add_conditional_edges(
                "evaluate_execution",
                self._should_finish_after_evaluation,
                {
                    "think": "think",
                    "finish": "finish"
                }
            )

            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)
            
        elif self.mode == "hybrid":
            # Enhanced hybrid approach with adaptive replanning
            workflow.add_node("decide_approach", self._decide_approach_node)
            workflow.add_node("simple", self._simple_node)
            workflow.add_node("plan", self._plan_node)
            workflow.add_node("execute", self._execute_node)
            workflow.add_node("evaluate_execution", self._evaluate_execution_node)  # New evaluation node
            workflow.add_node("adaptive_replan", self._adaptive_replan_node)      # New replanning node
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)
            
            workflow.set_entry_point("decide_approach")
            workflow.add_conditional_edges(
                "decide_approach",
                self._route_after_decision,
                {
                    "simple": "simple",
                    "plan": "plan",
                    "think": "think"
                }
            )

            workflow.add_edge("simple", END)


            # ReAct
            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )
            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")

            # Enhanced Plan-Execute path with adaptive replanning
            workflow.add_edge("plan", "execute")
            workflow.add_edge("execute", "evaluate_execution")  # Always evaluate after execution
            workflow.add_conditional_edges(
                "evaluate_execution",
                self._should_replan_after_evaluation,
                {
                    "adaptive_replan": "adaptive_replan",  # Replan if needed
                    "think": "think",                      # Fall back to ReAct if plan completed but unsatisfactory
                    "finish": "finish"                     # Finish if successful
                }
            )
            
            # Adaptive replanning path
            workflow.add_conditional_edges(
                "adaptive_replan",
                self._route_after_replan,
                {
                    "plan": "plan",      # Execute new plan
                    "think": "think",    # Switch to ReAct approach
                    "finish": "finish"   # Give up if replanning fails
                }
            )

            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)
            
        else:  # Default ReAct mode
            workflow.add_node("think", self._think_node)
            workflow.add_node("act", self._act_node)
            workflow.add_node("observe", self._observe_node)
            workflow.add_node("finish", self._finish_node)
            if self.enable_reflection:
                workflow.add_node("reflect", self._reflect_node)
            
            workflow.set_entry_point("think")
            
            workflow.add_conditional_edges(
                "think",
                self._should_continue_after_think,
                {
                    "act": "act",
                    "finish": "finish"
                }
            )
            
            workflow.add_edge("act", "observe")
            workflow.add_edge("observe", "think")
            if self.enable_reflection:
                workflow.add_edge("finish", "reflect")
                workflow.add_edge("reflect", END)
            else:
                workflow.add_edge("finish", END)
        
        # Compile with memory
        memory = MemorySaver()
        return workflow.compile(checkpointer=memory)
    
    async def run(self, query: str, max_steps: int = None) -> Dict[str, Any]:
        """Run the React Agent on a query."""
        if max_steps is None:
            max_steps = Config.MAX_ITERATIONS
        
        # Start session in context manager
        session_id = str(uuid.uuid4())
        self.context_manager.start_session(session_id, query)
        
        # Get LLM instance for this session
        self.llm = self.llm_manager.get_llm_for_session(session_id)
        
        # Create initial state
        initial_state = create_initial_state(query, max_steps)
        initial_state["session_id"] = session_id
        initial_state["mode"] = self.mode
        
        # Run the graph
        config = {
            "configurable": {"thread_id": f"react_agent_{session_id}"},
            "recursion_limit": 100
        }
        
        try:
            start_time = time.time()
            final_state = await self.graph.ainvoke(initial_state, config)
            execution_time = time.time() - start_time
            
            # Debug: Print final_state type and content
            if self.verbose:
                print(f"Debug: final_state type: {type(final_state)}")
                print(f"Debug: final_state content: {final_state}")
            
            # Create response
            response = {
                "input": query,
                "output": final_state.get("output", "No output generated") if isinstance(final_state, dict) else "No output generated",
                "steps": self._format_steps(final_state),
                "success": not final_state.get("has_error", False) if isinstance(final_state, dict) else False,
                "error": final_state.get("error_message") if isinstance(final_state, dict) else f"Invalid state type: {type(final_state)}",
                "metadata": {
                    **(final_state.get("metadata", {}) if isinstance(final_state, dict) else {}),
                    "mode": final_state.get("mode") if isinstance(final_state, dict) else self.mode,
                    "session_id": final_state.get("session_id") if isinstance(final_state, dict) else session_id,
                    "chosen_approach": final_state.get("chosen_approach") if isinstance(final_state, dict) else None,
                    "current_plan": final_state.get("current_plan") if isinstance(final_state, dict) else None,
                    "execution_time": execution_time
                }
            }
            
            # Store episode in episodic memory
            if response["success"]:
                episode = Episode(
                    id=session_id,
                    query=query,
                    response=response["output"],
                    reasoning_steps=response["steps"],
                    tools_used=list(set(step.get("action", "") for step in response["steps"] if step.get("action"))),
                    success=True,
                    duration=execution_time,
                    timestamp=time.time(),
                    importance=0.7,
                    metadata=response["metadata"]
                )
                await self.episodic_memory.store_episode(episode)
            
            # End session and cleanup
            await self.context_manager.end_session()
            self.llm_manager.cleanup_session(session_id)
            
            return response
            
        except Exception as e:
            await self.context_manager.end_session()
            self.llm_manager.cleanup_session(session_id)
            return {
                "input": query,
                "output": None,
                "steps": [],
                "success": False,
                "error": f"Agent execution failed: {str(e)}",
                "metadata": {"mode": self.mode, "session_id": session_id}
            }
    
    async def _think_node(self, state: AgentState) -> AgentState:
        """Think node - generates thoughts and decides on actions."""
        if self.verbose:
            print(f"\n🤔 Step {state['current_step'] + 1}: Thinking...")
        
        try:
            # Create thinking prompt
            prompt = await self._create_thinking_prompt(state)
            
            # if self.verbose:
            #     print(f"\n🔍 SYSTEM PROMPT:")
            #     print("=" * 80)
            #     print(self._get_system_prompt())
            #     print("=" * 80)
            #     print(f"\n🔍 USER PROMPT:")
            #     print("=" * 80)
            #     print(prompt)
            #     print("=" * 80)
            
            # Get LLM response with safe context manager
            messages = [
                SystemMessage(content=self._get_system_prompt()),
                HumanMessage(content=prompt)
            ]
            
            response = await safe_llm_invoke(self.llm, messages, state.get("session_id"))
            thought_content = response.content
            
            if self.verbose:
                print(f"\n🔍 AI MODEL RESPONSE:")
                print("=" * 80)
                print(thought_content)
                print("=" * 80)
                print(f"💭 Thought: {thought_content}")
            
            # Parse the thought to extract action if present
            # Check for multiple actions (which violates ReAct pattern)
            action_matches = re.findall(r'Action:\s*(\w+)', thought_content, re.IGNORECASE)
            if len(action_matches) > 1:
                if self.verbose:
                    print(f"⚠️ Warning: LLM generated {len(action_matches)} actions, using only the first one")
            
            # Extract the first action and its input
            action_match = re.search(r'Action:\s*(\w+)', thought_content, re.IGNORECASE)
            action_input_match = re.search(r'Action Input:\s*(.+?)(?=\n(?:Action|Observation|Thought|Final Answer)|$)', thought_content, re.IGNORECASE | re.DOTALL)

            # Update state
            state["thoughts"].append(thought_content)
            state["current_step"] += 1
            
            # Store reasoning step in context manager
            from memory.context_manager import ReasoningStep
            reasoning_step = ReasoningStep(
                step_number=state["current_step"],
                thought=thought_content,
                planned_action=action_match.group(1).lower() if action_match else None,
                action_input=action_input_match.group(1).strip() if action_input_match else None,
                confidence=0.7  # Default confidence
            )
            await self.context_manager.add_reasoning_step(reasoning_step)
            
            # If action is specified, prepare for action
            if action_match:
                action_name = action_match.group(1).lower()
                action_input = action_input_match.group(1).strip() if action_input_match else ""
                
                if self.verbose:
                    print(f"🔍 Parsed Action: {action_name}")
                    print(f"🔍 Parsed Action Input: '{action_input}'")
                
                state["actions"].append({
                    "name": action_name,
                    "input": action_input,
                    "step": state["current_step"]
                })
            
            return state
            
        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Thinking failed: {str(e)}"
            return state
    
    async def _act_node(self, state: AgentState) -> AgentState:
        """Act node - executes actions using tools."""
        if not state["actions"]:
            return state
        
        current_action = state["actions"][-1]
        action_name = current_action["name"]
        action_input = current_action["input"]
        
        if self.verbose:
            print(f"🔧 Action: {action_name}")
            print(f"📝 Input: {action_input}")
        
        try:
            # Execute the tool
            if self.verbose:
                print(f"\n🔧 EXECUTING TOOL: {action_name}")
                print(f"📥 Tool Input: {action_input}")
            
            result = await self.tool_manager.execute_tool(action_name, action_input)

            # 如果是网络搜索，需要对结果进行精简。
            if action_name == "web_search" and result.data and len(result.data['results']) != 0:
                print("web_search -->", result.data)
                # 1.筛选相关性高的结果( >= 0.2)
                query_vector = self.vector_memory.embedder.embed([str(action_input)])
                print("query_vector -->", query_vector)
                search_results = result.data['results']
                print("search_results -->", search_results)
                body_texts = [item.get('body', 'None') for item in search_results]
                search_vectors = self.vector_memory.embedder.embed(body_texts)
                similarity = (query_vector @ search_vectors)[0]
                print("similarity -->", similarity)
                indices = np.argsort(-similarity)
                filter_indices = np.where(similarity[indices] >= 0.2)[0]
                filter_results = [search_results[idx] for idx in indices[filter_indices]]
                print("filter_results -->", filter_results)

                # 2.节选重要文本
                key_datas = []
                max_words = 1000
                with ThreadPoolExecutor(max_workers=len(filter_results)) as executor:
                    futures = []
                    for i, item in enumerate(filter_results, start=1):
                        f = executor.submit(self.extract_keyinfo, action_input, item, state.get('session_id'), max_words)
                        futures.append(f)

                    for future in as_completed(futures):
                        if future.exception() is not None:
                            continue
                        res = future.result()
                        if res is not None:
                            key_datas.append(res)
                print("key_datas -->", key_datas)
                result.data['results'] = key_datas

            if self.verbose:
                print(f"📤 Tool Result: {result.dict()}")

            # Store result
            state["tool_results"].append({
                "tool": action_name,
                "input": action_input,
                "result": result.dict(),
                "step": state["current_step"]
            })

            # Store important results in context memory for session persistence
            await self._store_result_in_context(action_name, action_input, result, state)
            
            # Store tool context in memory system
            from memory.context_manager import ToolContext
            tool_context = ToolContext(
                tool_name=action_name,
                input_data=action_input,
                output_data=result.data if result.success else None,
                success=result.success,
                error_message=result.error if not result.success else None,
                execution_time=0.0,  # Could be measured if needed
                metadata=result.metadata if hasattr(result, 'metadata') else {}
            )
            await self.context_manager.add_tool_context(tool_context)
            
            return state
            
        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Action execution failed: {str(e)}"
            return state

    async def extract_keyinfo(self, query,  data: dict, session_id: str, max_words: int = 1000) -> dict:
        extract_prompt = f"""你是一名信息抽取专家，需要根据当前查询, 从原文内容中节选出与之相关的文本，并要求节选的字数控制在{max_words}以内。
        如果相关文本的总字数超过{max_words}，则进行适当摘要总结，并尽量保留关键信息和细节。

        ## 当前查询
        {query}
        
        ## 原文内容
        {data['body']}
        
        ## 输出要求
        请直接输出节选出的文本，如有多个片段，以回车符\n分段。    
        """

        messages = [
            HumanMessage(content=extract_prompt)
        ]
        response = await safe_llm_invoke(self.llm, messages, session_id)
        data['body'] = response.content
        return data

    async def _observe_node(self, state: AgentState) -> AgentState:
        """Observe node - processes tool results and creates observations."""
        if not state["tool_results"]:
            return state
        
        current_result = state["tool_results"][-1]
        tool_result = current_result["result"]
        
        # Create observation based on tool result
        if tool_result["success"]:
            observation = f"Tool '{current_result['tool']}' executed successfully. Result: {json.dumps(tool_result['data'], indent=2)}"
        else:
            observation = f"Tool '{current_result['tool']}' failed. Error: {tool_result['error']}"
        
        state["observations"].append(observation)
        
        if self.verbose:
            print(f"👁️ Observation: {observation}")
        
        return state
    
    async def _finish_node(self, state: AgentState) -> AgentState:
        """Finish node - generates final output."""
        if self.verbose:
            print(f"\n✅ Finishing...")
        
        try:
            # Check if output was already set in the thinking phase
            if state.get("output") and state.get("is_complete"):
                if self.verbose:
                    print(f"🎯 Final Answer: {state['output']}")
                return state
            
            # Create final answer prompt
            prompt = self._create_final_answer_prompt(state)
            
            messages = [
                SystemMessage(content=self._get_system_prompt()),
                HumanMessage(content=prompt)
            ]
            
            response = await safe_llm_invoke(self.llm, messages, state.get("session_id"))
            final_answer = response.content
            
            # Extract final answer if it follows the format
            answer_match = re.search(r'Final Answer:\s*(.+)', final_answer, re.IGNORECASE | re.DOTALL)
            if answer_match:
                final_answer = answer_match.group(1).strip()
            
            state["output"] = final_answer
            state["is_complete"] = True
            
            if self.verbose:
                print(f"🎯 Final Answer: {final_answer}")
            
            return state
            
        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Final answer generation failed: {str(e)}"
            return state
    
    async def _reflect_node(self, state: AgentState) -> AgentState:
        """Reflection node - performs self-critique and response refinement."""
        if not self.reflection_module:
            if self.verbose:
                print("🔍 Reflection disabled, skipping...")
            return state
        
        if self.verbose:
            print(f"\n🔍 Starting reflection process...")
        
        try:
            # Set reflection enabled flag
            state["reflection_enabled"] = True
            
            # Store original response for comparison
            original_response = state.get("output", "")
            state["original_response"] = original_response
            
            if not original_response:
                if self.verbose:
                    print("⚠️ No output to reflect on, skipping reflection")
                return state
            
            # Prepare reasoning steps for reflection
            reasoning_steps = []
            
            # Add thoughts as reasoning steps
            for i, thought in enumerate(state.get("thoughts", [])):
                reasoning_steps.append({
                    "step": i + 1,
                    "thought": thought,
                    "action": None,
                    "observation": None
                })
            
            # Add actions and observations
            actions = state.get("actions", [])
            observations = state.get("observations", [])
            tool_results = state.get("tool_results", [])
            
            for i, action in enumerate(actions):
                step_data = {
                    "step": action.get("step", i + 1),
                    "thought": None,
                    "action": action.get("name"),
                    "action_input": action.get("input"),
                    "observation": observations[i] if i < len(observations) else None
                }
                
                # Find corresponding reasoning step or create new one
                step_found = False
                for rs in reasoning_steps:
                    if rs["step"] == step_data["step"]:
                        rs.update({k: v for k, v in step_data.items() if v is not None})
                        step_found = True
                        break
                
                if not step_found:
                    reasoning_steps.append(step_data)
            
            # Sort reasoning steps by step number
            reasoning_steps.sort(key=lambda x: x["step"])
            
            # Perform reflection and refinement
            refined_response, reflection_metadata = await self.reflection_module.reflect_and_refine(
                state, original_response, reasoning_steps
            )
            
            # Update state with reflection results
            state["output"] = refined_response
            state["reflection_iterations"] = reflection_metadata["reflection_iterations"]
            state["reflection_history"] = reflection_metadata["reflection_history"]
            state["final_quality_score"] = reflection_metadata["final_quality_score"]
            state["reflection_improvements"] = reflection_metadata["total_improvements"]
            
            # Update metadata
            if "metadata" not in state:
                state["metadata"] = {}
            state["metadata"]["reflection"] = reflection_metadata
            
            if self.verbose:
                print(f"🎉 Reflection completed!")
                print(f"📊 Quality Score: {reflection_metadata['final_quality_score']:.2f}")
                print(f"🔧 Improvements: {len(reflection_metadata['total_improvements'])}")
                if reflection_metadata['total_improvements']:
                    for improvement in reflection_metadata['total_improvements']:
                        print(f"  • {improvement}")
            
            return state
            
        except Exception as e:
            if self.verbose:
                print(f"❌ Reflection failed: {str(e)}")
            
            # Add error to metadata but don't fail the entire process
            if "metadata" not in state:
                state["metadata"] = {}
            state["metadata"]["reflection_error"] = str(e)
            
            return state
    
    def _should_continue_after_think(self, state: AgentState) -> str:
        """Decide whether to continue with action or finish."""
        # Check if we've reached max steps
        if state["current_step"] >= state["max_steps"]:
            return "finish"
        
        # Check if there's an error
        if state["has_error"]:
            return "finish"
        
        # Check if the last thought indicates we should finish
        if state["thoughts"]:
            last_thought = state["thoughts"][-1]
            # Check for Final Answer pattern
            if re.search(r'Final Answer:', last_thought, re.IGNORECASE):
                # Extract the final answer and set it as output
                final_answer_match = re.search(r'Final Answer:\s*(.+)', last_thought, re.IGNORECASE | re.DOTALL)
                if final_answer_match:
                    state["output"] = final_answer_match.group(1).strip()
                    state["is_complete"] = True
                return "finish"
            
            # Check for other completion indicators
            last_thought_lower = last_thought.lower()
            if "i now know" in last_thought_lower and "final answer" in last_thought_lower:
                return "finish"
        
        # Check if there's an action to execute
        if state["actions"] and len(state["actions"]) > len(state["observations"]):
            return "act"
        
        # Default to finish if no clear action
        return "finish"
    
    def _get_system_prompt(self) -> str:
        """Get the system prompt for the React Agent."""
        tools_description = self.tool_manager.format_tools_for_prompt()
        
#         return f"""You are a helpful AI assistant that uses the ReAct (Reasoning and Acting) framework to solve problems.
#
# You have access to the following tools:
# {tools_description}
#
# MEMORY CAPABILITIES:
# - You have access to context from your current session, including previous calculations and tool results
# - You can access similar past interactions from your episodic memory
# - When users refer to "the number I just calculated" or similar references, check your memory context first
# - Your memory context will be provided in the prompt when relevant
#
# IMPORTANT: Follow the ReAct pattern strictly. In each response, provide ONLY ONE of the following:
#
# 1. A Thought (reasoning about what to do next)
# 2. An Action with Action Input (if you need to use a tool)
# 3. A Final Answer (when you're ready to conclude)
#
# Use this exact format:
#
# Thought: [Your reasoning about what to do next]
#
# OR
#
# Action: [tool_name]
# Action Input: [complete_input_for_tool]
#
# OR
#
# Final Answer: [Your final response to the user]
#
# Available tools: {', '.join(self.tool_manager.get_tool_names())}
#
# Tool usage examples:
# - Calculator: Action Input: 2 + 3 * 4
# - Database: Action Input: set calculation_result 42
# - Database: Action Input: get calculation_result
# - Web Search: Action Input: information about number 42
# - Wikipedia: Action Input: number 42
#
# Critical rules:
# 1. Provide ONLY ONE thought, action, or final answer per response
# 2. Do NOT include observations in your response - they will be provided automatically
# 3. Do NOT simulate the entire conversation - just provide the next step
# 4. For database operations, always include the complete command (e.g., "set key value", not just "set")
# 5. Think step by step and use tools when you need external information
# 6. When you have enough information, provide a Final Answer
# 7. ALWAYS check your memory context first when users refer to previous results or calculations
# 8. Use your memory to avoid repeating calculations or searches you've already done
#
# Begin!"""

        return f"""你是一个乐于助人的人工智能助手，它使用ReAct（推理与行动）框架来解决问题。

你可以使用以下工具：
{tools_description}

记忆功能：
- 你可以访问当前会话的上下文，包括之前的计算和工具结果
- 你可以访问情景记忆中类似的过往交互
- 当用户提到“我刚刚计算的数字”或类似表述时，请先检查你的记忆上下文
- 相关时，你的记忆上下文将在提示中提供

重要提示：严格遵循ReAct模式。在每个回复中，仅提供以下内容之一：

1. 思考（关于下一步该做什么的推理）
2. 行动及行动输入（如果你需要使用工具）
3. 最终答案（当你准备好得出结论时）

使用以下确切格式：

思考：[你关于下一步该做什么的推理]

或者

Action: [工具名称]
Action Input: [工具的完整输入]

或者

最终答案：[你给用户的最终回复]

可用工具：{', '.join(self.tool_manager.get_tool_names())}

关键规则：
1. 每次回复仅提供一个思考、行动或最终答案
2. 不要在你的回复中包含观察结果——它们将自动提供
3. 不要模拟整个对话——只提供下一步
4. 对于数据库操作，始终包含完整命令（例如，“set key value”，而不仅仅是“set”）
5. 逐步思考，并在需要外部信息时使用工具
6. 当你有足够信息时，提供最终答案
7. 当用户提及之前的结果或计算时，务必先检查你的记忆上下文
8. 利用你的记忆避免重复已经做过的计算或搜索

如果用户提问涉及日期/时间，请结合当前系统时间（{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())})进行分析。

"""

    async def _store_result_in_context(self, action_name: str, action_input: str, result: Any, state: AgentState):
        """Store important tool results in context memory for session persistence."""
        try:
            if result.success:
                # Store calculator results
                if action_name == "calculator" and result.data:
                    calculation_result = result.data.get("result")
                    expression = result.data.get("expression")
                    if calculation_result is not None:
                        self.context_manager.set_shared_variable(
                            "last_calculation_result", 
                            calculation_result, 
                            source_tool="calculator"
                        )
                        self.context_manager.set_shared_variable(
                            "last_calculation_expression", 
                            expression, 
                            source_tool="calculator"
                        )
                        # Also store with a timestamped key for history
                        timestamp_key = f"calculation_{int(time.time())}"
                        self.context_manager.set_shared_variable(
                            timestamp_key, 
                            {"expression": expression, "result": calculation_result}, 
                            source_tool="calculator"
                        )
                
                # Store database results that might be important
                elif action_name == "database" and result.data:
                    if "get" in action_input.lower():
                        # Store retrieved data
                        key_match = re.search(r'get\s+(\w+)', action_input.lower())
                        if key_match:
                            key = key_match.group(1)
                            self.context_manager.set_shared_variable(
                                f"db_retrieved_{key}", 
                                result.data, 
                                source_tool="database"
                            )
                    elif "set" in action_input.lower():
                        # Store confirmation of data storage
                        self.context_manager.set_shared_variable(
                            "last_db_operation", 
                            {"operation": "set", "input": action_input, "result": result.data}, 
                            source_tool="database"
                        )
                
                # Store web search results summary
                elif action_name in ["web_search", "wikipedia"] and result.data:
                    search_summary = str(result.data)[:200] + "..." if len(str(result.data)) > 200 else str(result.data)
                    self.context_manager.set_shared_variable(
                        f"last_{action_name}_result", 
                        search_summary, 
                        source_tool=action_name
                    )
        except Exception as e:
            if self.verbose:
                print(f"⚠️ Warning: Failed to store result in context: {str(e)}")

    async def _create_thinking_prompt(self, state: AgentState) -> str:
        """Create prompt for the thinking phase."""
        prompt_parts = [f"Question: {state['input']}"]
        
        # Add relevant memory context
        memory_context = await self._get_relevant_memory_context(state)
        if memory_context:
            prompt_parts.append(f"\nRelevant Context from Memory:\n{memory_context}")
        
        # Add conversation history - include all completed thought-action-observation cycles
        for i in range(len(state["thoughts"])):
            thought = state["thoughts"][i]
            action = state["actions"][i] if i < len(state["actions"]) else None
            observation = state["observations"][i] if i < len(state["observations"]) else None
            
            # Only include completed cycles (thought + action + observation)
            if action and observation:
                prompt_parts.append(f"Thought: {thought}")
                prompt_parts.append(f"Action: {action['name']}")
                prompt_parts.append(f"Action Input: {action['input']}")
                prompt_parts.append(f"Observation: {observation}")
        
        prompt_parts.append("Thought:")
        return "\n".join(prompt_parts)
    
    async def _get_relevant_memory_context(self, state: AgentState) -> str:
        """Get relevant memory context for the current query."""
        context_parts = []
        
        try:
            # Get shared variables from current session
            shared_vars = self.context_manager.get_all_shared_variables()
            if self.verbose:
                print(f"🔍 Debug: Shared variables: {shared_vars}")
            
            if shared_vars:
                relevant_vars = {}
                query_lower = state['input'].lower()
                
                # Include calculation results if query mentions calculations, numbers, or "just calculated"
                if any(keyword in query_lower for keyword in ['calculate', 'number', 'result', 'just calculated', 'computed', 'math']):
                    for key, value in shared_vars.items():
                        if isinstance(key, str) and ('calculation' in key or key in ['last_calculation_result', 'last_calculation_expression']):
                            relevant_vars[key] = value
                
                # Include database results if query mentions data or specific keys
                if any(keyword in query_lower for keyword in ['data', 'database', 'stored', 'saved', 'retrieved']):
                    for key, value in shared_vars.items():
                        if isinstance(key, str) and ('db_' in key or 'database' in key):
                            relevant_vars[key] = value
                
                # Include search results if query mentions search or information
                if any(keyword in query_lower for keyword in ['search', 'information', 'about', 'find']):
                    for key, value in shared_vars.items():
                        if isinstance(key, str) and ('search' in key or 'wikipedia' in key):
                            relevant_vars[key] = value
                
                if relevant_vars:
                    context_parts.append("Current Session Context:")
                    for key, value in relevant_vars.items():
                        context_parts.append(f"  {key}: {value}")
            
            # Search episodic memory for similar past interactions
            if hasattr(self, 'episodic_memory'):
                try:
                    similar_episodes = await self.episodic_memory.find_similar_episodes(state['input'], top_k=5)
                    if similar_episodes:
                        context_parts.append("\nSimilar Past Interactions (use these to help answer the current query):")
                        for episode, similarity in similar_episodes:
                            if similarity > 0.2:  # Lower threshold to include more episodes
                                # For calculation queries, include specific calculation results
                                if any(keyword in state['input'].lower() for keyword in ['calculate', 'calculation', 'previous', 'result', 'math']):
                                    context_parts.append(f"  Previous Query: {episode.query}")
                                    context_parts.append(f"  Previous Result: {episode.response}")
                                    context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                                    # Extract calculation details if available
                                    if 'calculator' in episode.tools_used:
                                        for step in episode.reasoning_steps:
                                            if isinstance(step, dict) and 'action' in step and step['action'] == 'calculator':
                                                context_parts.append(f"  Calculation: {step.get('input', 'N/A')} = {episode.response}")
                                                break
                                    context_parts.append("")
                                else:
                                    context_parts.append(f"  Query: {episode.query}")
                                    context_parts.append(f"  Response: {episode.response}")
                                    context_parts.append(f"  Tools used: {', '.join(episode.tools_used)}")
                                    context_parts.append("")
                except Exception as ep_error:
                    if self.verbose:
                        print(f"⚠️ Warning: Failed to get episodic memory: {str(ep_error)}")
            
        except Exception as e:
            if self.verbose:
                print(f"⚠️ Warning: Failed to get memory context: {str(e)}")
                import traceback
                traceback.print_exc()
        
        return "\n".join(context_parts) if context_parts else ""
    
    def _create_final_answer_prompt(self, state: AgentState) -> str:
        """Create prompt for generating the final answer."""
        prompt_parts = [f"Question: {state['input']}"]
        
        # Add full conversation history
        for i, (thought, action, observation) in enumerate(zip(
            state["thoughts"], 
            state["actions"] + [None] * len(state["thoughts"]), 
            state["observations"] + [None] * len(state["thoughts"])
        )):
            prompt_parts.append(f"Thought: {thought}")
            if action:
                prompt_parts.append(f"Action: {action['name']}")
                prompt_parts.append(f"Action Input: {action['input']}")
            if observation:
                prompt_parts.append(f"Observation: {observation}")
        
        prompt_parts.append("Based on the above reasoning, provide your final answer:")
        prompt_parts.append("Final Answer:")
        
        return "\n".join(prompt_parts)
    
    def _format_steps(self, state: AgentState) -> List[Dict[str, Any]]:
        """Format the reasoning steps for output."""
        steps = []
        
        # Handle case where state might not be a proper dictionary
        if not isinstance(state, dict):
            print(f"Warning: Expected dict for state, got {type(state)}: {state}")
            return []
        
        # Safely get the required fields with defaults
        thoughts = state.get("thoughts", [])
        actions = state.get("actions", [])
        observations = state.get("observations", [])
        
        for i, thought in enumerate(thoughts):
            step = {
                "step": i + 1,
                "thought": thought,
                "action": None,
                "action_input": None,
                "observation": None
            }
            
            if i < len(actions):
                action = actions[i]
                if isinstance(action, dict):
                    step["action"] = action.get("name")
                    step["action_input"] = action.get("input")
                else:
                    step["action"] = str(action)
            
            if i < len(observations):
                step["observation"] = observations[i]
            
            steps.append(step)
        
        return steps
    
    def _create_tao_steps(self, state: AgentState) -> List[ThoughtActionObservation]:
        """Create ThoughtActionObservation objects from state."""
        steps = []
        
        for i, thought in enumerate(state["thoughts"]):
            action = state["actions"][i]["name"] if i < len(state["actions"]) else None
            action_input = state["actions"][i]["input"] if i < len(state["actions"]) else None
            observation = state["observations"][i] if i < len(state["observations"]) else None
            
            steps.append(ThoughtActionObservation(
                thought=thought,
                action=action,
                action_input=action_input,
                observation=observation,
                step=i + 1
            ))
        
        return steps
    
    async def get_memory_stats(self) -> Dict[str, Any]:
        """Get memory and usage statistics."""
        return {
            "conversation_count": len(self.memory.conversation_history),
            "tool_usage": self.memory.get_tool_stats(),
            "available_tools": self.tool_manager.get_tool_names(),
            "episodic_memory_stats": await self.episodic_memory.get_episode_stats() if hasattr(self, 'episodic_memory') else {},
            "execution_stats": self.executor.get_execution_stats() if hasattr(self, 'executor') else {}
        }
    
    # New nodes for hybrid approach
    
    async def _decide_approach_node(self, state: AgentState) -> AgentState:
        """Decide whether to use ReAct or Plan-Execute approach."""
        if self.verbose:
            print(f"\n🤔 Deciding approach for: {state['input']}")
        
        try:
            # Get similar episodes to inform decision
            similar_episodes = await self.episodic_memory.find_similar_episodes(state['input'], top_k=3)
            
            # Create decision prompt
            decision_prompt = self._create_decision_prompt(state, similar_episodes)
            
            messages = [
                SystemMessage(content=self._get_decision_system_prompt()),
                HumanMessage(content=decision_prompt)
            ]

            response = await safe_llm_invoke(self.llm, messages, state.get("session_id"))
            decision_text = response.content.lower()

            # Parse decision
            if "simple" in decision_text:
                state["chosen_approach"] = "simple"
                if self.verbose:
                    print("📋 Chosen approach: simple")
            elif "plan" in decision_text and "execute" in decision_text:
                state["chosen_approach"] = "plan_execute"
                if self.verbose:
                    print("📋 Chosen approach: Plan-Execute")
            else:
                state["chosen_approach"] = "react"
                if self.verbose:
                    print("🔄 Chosen approach: ReAct")
            
            return state
            
        except Exception as e:
            # Default to ReAct on error
            state["chosen_approach"] = "react"
            if self.verbose:
                print(f"⚠️ Decision failed, defaulting to ReAct: {str(e)}")
            return state

    async def _simple_node(self, state: AgentState) -> AgentState:
        """Simple node - Direct Reply."""
        if self.verbose:
            print(f"\n📋 Direct reply for: {state['input']}")

        try:
            similar_episodes = await self.episodic_memory.find_similar_episodes(state['input'], top_k=3)
            similar_episodes_text = ""
            if similar_episodes:
                similar_episodes_text = "\n类似的过往事件:\n"
                for episode, similarity in similar_episodes[:3]:
                    approach = "Plan-Execute" if len(episode.tools_used) > 2 else "ReAct"
                    similar_episodes_text += f"- Query: '{episode.query}' | Approach: {approach} | Success: {episode.success} | Tools: {len(episode.tools_used)}\n"

            input_prompt = (
                f"结合历史记录，对用户当前问题进行回答。\n" +
                f"## 历史记录：\n{similar_episodes_text}\n" +
                f"## 当前问题：\n{state['input']}"
            )

            messages = [
                SystemMessage(content=f'你是一个乐于助人的人工智能助手，你可以直接对当前的用户提问进行回答，' +
                                      f'如果用户提问涉及日期/时间，请结合当前系统时间（{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())})进行分析。'
                ),
                HumanMessage(content=input_prompt)
            ]

            response = await safe_llm_invoke(self.llm, messages, state.get("session_id"))
            answer = response.content

            answer_match = re.search(r'Final Answer:\s*(.+)', answer, re.IGNORECASE | re.DOTALL)
            if answer_match:
                answer = answer_match.group(1).strip()

            state["output"] = answer
            state["is_complete"] = True

            if self.verbose:
                print(f"🎯 Simple Answer: {answer}")

            return state

        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Planning failed: {str(e)}"
            return state


    async def _plan_node(self, state: AgentState) -> AgentState:
        """Plan node - creates execution plan."""
        if self.verbose:
            print(f"\n📋 Planning for: {state['input']}")
        
        try:
            # Get context for planning
            context = await self.context_manager.get_relevant_context("planner", state['input'])
            
            # Create plan
            plan = await self.planner.create_plan(
                query=state['input'],
                available_tools=self.tool_manager.get_tool_names(),
                context=context
            )

            state["current_plan"] = plan
            state["metadata"]["plan_id"] = plan.id
            state["metadata"]["plan_confidence"] = plan.confidence
            
            if self.verbose:
                print(f"📝 Created plan with {len(plan.steps)} steps (confidence: {plan.confidence:.2f})")
            
            return state
            
        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Planning failed: {str(e)}"
            return state
    
    async def _execute_node(self, state: AgentState) -> AgentState:
        """Execute node - executes the plan."""
        if self.verbose:
            print(f"\n⚡ Executing plan...")
        
        try:
            plan = state.get("current_plan")
            if not plan:
                state["has_error"] = True
                state["error_message"] = "No plan available for execution"
                return state
            
            # Execute the plan
            execution_result = await self.executor.execute_plan(plan)

            state["execution_result"] = execution_result
            state["metadata"]["execution_success_rate"] = execution_result.success_rate
            state["metadata"]["execution_time"] = execution_result.total_time
            
            # Update state based on execution result
            if execution_result.status == ExecutionStatus.COMPLETED:
                state["output"] = execution_result.final_output
                state["is_complete"] = True
                if self.verbose:
                    print(f"✅ Plan executed successfully (success rate: {execution_result.success_rate:.2f})")
            else:
                state["plan_failed"] = True
                if self.verbose:
                    print(f"⚠️ Plan execution failed (success rate: {execution_result.success_rate:.2f})")
            
            return state
            
        except Exception as e:
            state["has_error"] = True
            state["error_message"] = f"Execution failed: {str(e)}"
            return state

    def _route_after_decision(self, state: AgentState) -> str:
        """Route after approach decision."""
        chosen_approach = state.get("chosen_approach", "simple")
        if chosen_approach == 'simple':
            return "simple"
        elif chosen_approach == "plan_execute":
            return "plan"
        else:
            return "think"

    def _should_continue_after_execute(self, state: AgentState) -> str:
        """Decide what to do after plan execution."""
        # If execution was successful, finish
        if state.get("is_complete", False):
            return "finish"
        
        # If plan failed and we haven't exceeded max steps, fall back to ReAct
        if state.get("plan_failed", False) and state["current_step"] < state["max_steps"]:
            return "think"
        
        # Otherwise finish
        return "finish"
    
    # Helper methods for new functionality
    
    def _create_decision_prompt(self, state: AgentState, similar_episodes: List[Tuple]) -> str:
        """Create prompt for approach decision."""
        query = state['input']
        
        # Analyze query complexity
        complexity_indicators = [
            "multiple steps", "first", "then", "after that", "calculate and",
            "search and", "find and", "compare", "analyze", "complex",
            "多个步骤", "首先", "然后", "之后", "计算并",
            "搜索并", "找到并", "比较", "分析", "复杂",
        ]
        
        has_complexity = any(indicator in query.lower() for indicator in complexity_indicators)
        
        similar_episodes_text = ""
        if similar_episodes:
            # similar_episodes_text = "\nSimilar past episodes:\n"
            similar_episodes_text = "\n类似的过往事件:\n"
            for episode, similarity in similar_episodes[:3]:
                approach = "Plan-Execute" if len(episode.tools_used) > 2 else "ReAct"
                similar_episodes_text += f"- Query: '{episode.query}' | Approach: {approach} | Success: {episode.success} | Tools: {len(episode.tools_used)}\n"
        
#         return f"""Analyze this query and decide the best approach:
#
# Query: "{query}"
#
# Query Analysis:
# - Appears complex (multiple steps): {has_complexity}
# - Word count: {len(query.split())}
# {similar_episodes_text}
#
# Available approaches:
# 1. **ReAct**: Good for simple queries, exploratory tasks, when you need to adapt based on intermediate results
# 2. **Plan-Execute**: Good for complex multi-step tasks, when you can plan ahead, structured workflows
#
# Choose the best approach and explain why. Respond with either "ReAct" or "Plan-Execute" followed by your reasoning."""

        return f"""分析此查询并确定最佳方法：

查询：“{query}”

查询分析：
- 看起来较为复杂（多步骤）：{has_complexity}
- 词数：{len(query.split())}

历史记录：
{similar_episodes_text}

可用方法：
1. **Simple**：适用于可以直接给出明确回答的简单对话，以及在用户未明确要求进行仔细思考或专业回答的常识性问题
2. **ReAct**：适用于查询、探索性任务，以及需要根据中间结果进行调整的情况
3. **Plan-Execute**：适用于复杂的多步骤任务、可能需要结合上下文才能准确回答的问题、以及可以提前规划的情况以及结构化工作流程

选择最佳方法并解释原因。请回复“Simple”或“ReAct”或“Plan-Execute”，并附上理由。"""

    def _get_decision_system_prompt(self) -> str:
        """Get system prompt for approach decision."""
#         return """You are an expert at choosing the best problem-solving approach for AI agents.
#
# Guidelines for choosing approaches:
#
# **Choose ReAct when:**
# - Query is simple or exploratory
# - You need to adapt based on intermediate results
# - The path forward is unclear
# - Query involves discovery or research
#
# **Choose Plan-Execute when:**
# - Query has clear multiple steps
# - You can plan the entire workflow upfront
# - Query involves structured data processing
# - Efficiency is important (parallel execution possible)
#
# Always explain your reasoning briefly."""
        return """你是为人工智能代理选择最佳问题解决方法的专家。

选择方法的指导原则：

**在以下情况选择Simple：**
- 可以直接给出明确回答的简单对话
- 在用户未明确要求进行仔细思考或专业回答的常识性问题

**在以下情况选择ReAct：**
- 查询简单或具有探索性
- 需要根据中间结果进行调整
- 前进的路径不明确
- 查询涉及发现或研究

**在以下情况选择Plan-Execute：**
- 可能需要结合上下文才能准确回答的问题
- 查询有明确的多个步骤
- 可以预先规划整个工作流程
- 查询涉及结构化数据处理
- 效率很重要（可进行并行执行）

务必简要解释你的推理过程。"""

    async def _evaluate_execution_node(self, state: AgentState) -> AgentState:
        """Evaluate execution results and decide if replanning is needed."""
        if self.verbose:
            print(f"\n🔍 Evaluating execution results...")
        
        try:
            # Get current plan and execution results
            current_plan = state.get("current_plan")
            execution_result = state.get("execution_result")
            
            if not current_plan or not execution_result:
                # No plan or execution result, proceed to finish
                state["evaluation_result"] = "no_plan_or_result"
                return state
            
            # Create adaptation context
            adaptation_context = AdaptationContext(
                original_query=state["input"],
                current_plan=current_plan,
                execution_results=execution_result.step_results if hasattr(execution_result, 'step_results') else [],
                partial_outputs=state.get("partial_outputs", {}),
                failed_attempts=state.get("failed_attempts", []),
                available_tools=self.tool_manager.get_tool_names(),
                time_budget_remaining=max(0, 300 - (state.get("current_step", 0) * 10)),  # Estimate remaining time
                success_probability=execution_result.success_rate if hasattr(execution_result, 'success_rate') else 0.5,
                context_variables=state.get("context_variables", {})
            )
            
            # Check if we should replan
            replan_decision = await self.adaptive_replanner.should_replan(
                adaptation_context, 
                state.get("session_id")
            )
            
            # Store evaluation results in state
            state["evaluation_result"] = "replan_needed" if replan_decision.should_replan else "continue"
            state["replan_decision"] = replan_decision
            state["adaptation_context"] = adaptation_context
            
            if self.verbose:
                print(f"📊 Evaluation: {state['evaluation_result']}")
                if replan_decision.should_replan:
                    print(f"🔄 Replanning recommended: {replan_decision.reasoning}")
                    print(f"📈 Expected improvement: {replan_decision.estimated_improvement:.2f}")
            
            return state
            
        except Exception as e:
            if self.verbose:
                print(f"❌ Evaluation failed: {str(e)}")
            state["evaluation_result"] = "evaluation_failed"
            state["error_message"] = f"Evaluation failed: {str(e)}"
            return state
    
    async def _adaptive_replan_node(self, state: AgentState) -> AgentState:
        """Execute adaptive replanning based on evaluation."""
        if self.verbose:
            print(f"\n🔄 Executing adaptive replanning...")
        
        try:
            replan_decision = state.get("replan_decision")
            adaptation_context = state.get("adaptation_context")
            
            if not replan_decision or not adaptation_context:
                state["replan_result"] = "no_decision_or_context"
                return state
            
            # Execute the replanning
            new_plan, replan_record = await self.adaptive_replanner.execute_adaptive_replan(
                replan_decision,
                adaptation_context,
                state.get("session_id")
            )
            
            # Update state with new plan
            state["current_plan"] = new_plan
            state["replan_result"] = "success"
            state["replan_record"] = replan_record
            
            # Reset execution state for new plan attempt
            if "execution_result" in state:
                del state["execution_result"]
            
            # Track replanning attempts
            replanning_attempts = state.get("replanning_attempts", 0) + 1
            state["replanning_attempts"] = replanning_attempts
            
            if self.verbose:
                print(f"✅ Replanning successful: {replan_decision.strategy.value}")
                print(f"🎯 New plan: {new_plan.goal}")
                print(f"📝 Steps: {len(new_plan.steps)}")
                print(f"🔢 Replanning attempt: {replanning_attempts}")
            
            # Prevent infinite replanning loops
            if replanning_attempts >= 3:
                state["replan_result"] = "max_attempts_reached"
                if self.verbose:
                    print(f"⚠️ Maximum replanning attempts reached, switching to ReAct")
            
            return state
            
        except Exception as e:
            if self.verbose:
                print(f"❌ Replanning failed: {str(e)}")
            state["replan_result"] = "failed"
            state["error_message"] = f"Replanning failed: {str(e)}"
            return state

    def _should_finish_after_evaluation(self, state: AgentState) -> str:
        """Determine next step after execution evaluation."""
        evaluation_result = state.get("evaluation_result", "no_result")
        execution_result = state.get("execution_result")

        # Check if execution was successful enough to finish
        if (evaluation_result == "continue" and execution_result and
                hasattr(execution_result, 'success_rate') and execution_result.success_rate >= 0.7):
            return "finish"

        # Fall back to ReAct if evaluation failed or plan completed but unsatisfactory
        return "think"

    def _should_replan_after_evaluation(self, state: AgentState) -> str:
        """Determine next step after execution evaluation."""
        evaluation_result = state.get("evaluation_result", "no_result")
        execution_result = state.get("execution_result")
        
        # Check if execution was successful enough to finish
        if (evaluation_result == "continue" and execution_result and 
            hasattr(execution_result, 'success_rate') and execution_result.success_rate >= 0.7):
            return "finish"
        
        # Check if we should replan
        if evaluation_result == "replan_needed":
            return "adaptive_replan"
        
        # Fall back to ReAct if evaluation failed or plan completed but unsatisfactory
        return "think"
    
    def _route_after_replan(self, state: AgentState) -> str:
        """Route execution after replanning."""
        replan_result = state.get("replan_result", "no_result")
        replanning_attempts = state.get("replanning_attempts", 0)
        
        # If replanning failed or max attempts reached, fall back to ReAct
        if replan_result in ["failed", "max_attempts_reached", "no_decision_or_context"]:
            return "think"
        
        # If replanning was successful, try executing the new plan
        if replan_result == "success":
            replan_record = state.get("replan_record", {})
            
            # Check if the strategy suggests switching to ReAct approach
            if replan_record.get("strategy") == "switch_approach":
                return "think"
            else:
                return "plan"  # Execute the new plan
        
        # Default fallback
        return "finish"
    
    def switch_database_type(self, use_mysql: bool, mysql_config: Optional[Dict[str, Any]] = None):
        """Switch between MySQL and in-memory database."""
        if mysql_config is None:
            mysql_config = {
                "host": "localhost",
                "port": 3306,
                "database": "react_agent_db",
                "user": "root",
                "password": "root"
            }
        
        self.tool_manager.switch_database_type(use_mysql, mysql_config)
        
        if self.verbose:
            print(f"🔄 Switched to {'MySQL' if use_mysql else 'in-memory'} database")
            if use_mysql:
                db_status = self.tool_manager.get_database_status()
                print(f"📊 Database Status: {db_status}")
    
    def get_database_status(self) -> Dict[str, Any]:
        """Get current database status."""
        return self.tool_manager.get_database_status()
    
    def get_enhanced_tool_info(self) -> Dict[str, Any]:
        """Get comprehensive information about available tools."""
        return {
            "tool_manager_type": "EnhancedToolManager",
            "database_status": self.get_database_status(),
            "available_tools": self.tool_manager.get_tool_names(),
            "tool_descriptions": self.tool_manager.get_tool_descriptions(),
            "tools_schema": self.tool_manager.get_tools_schema()
        }