import concurrent.futures
import importlib.util # Add this import
import subprocess
import json
import os
import sys
import traceback
import inspect # Import inspect module
import re # Import re module for regex
import xml.etree.ElementTree as ET # Import for XML parsing
from loguru import logger # Import logger
logger.level("INFO")
from datetime import datetime # Import datetime
import uuid # Import uuid
import threading # Import threading for asynchronous memory operations
import queue # Import queue for task management
import time # Import time for retry delays
import shutil # Import shutil

from root_agent.llm import LLMInterface
from root_agent.utils.llm_wrapper import GMemoryLLMWrapper
import config

from root_agent.utils import history_compressor
from root_agent.utils.evolution_manager import EvolutionManager
from root_agent.utils.utils import Utils # Import Utils
from root_agent.utils.task_management import TaskManager # Import TaskManager
from root_agent.utils.save_task_data import save_task_data_func # Import save_task_data_func
from root_agent.utils.memory_management import MemoryManager # Import MemoryManager
from root_agent.utils.task_assessment import TaskAssessment # Import TaskAssessment
from root_agent.utils.task_value_assessor import TaskValueAssessor
from root_agent.utils.task_status_manager import TaskStatusManager # Import TaskStatusManager
from root_agent.utils.tool_utils import ToolUtils # Import ToolUtils
from root_agent.utils.plan_execution import create_plan, execute_plan
from root_agent.utils.task_initialization import initialize_task
from root_agent.utils.prompt_formatter import format_prompt
from root_agent.utils.output_parser import parse_llm_output
from root_agent.utils.helper import (
    setup_logging,
    load_settings,
    get_llm_api_key,
    get_llm_base_url,
    get_model_name,
    get_tool_config,
    get_json_from_str,
    print_and_log,
    load_chat_history,
    save_chat_history,
)


# Custom Exception Classes for more granular error handling
class ToolExecutionError(Exception):
    """Custom exception for errors during tool execution."""
    pass

class InvalidParametersError(ToolExecutionError):
    """Custom exception for invalid parameters passed to a tool."""
    pass

class APIError(ToolExecutionError):
    """Custom exception for API-related errors during tool execution."""
    pass
from tools.tool_manager import ToolManager
from memory_systems.GMemory.mas.memory.mas_memory.GMemory import GMemory
from memory_systems.GMemory.mas.memory.common import MASMessage, StateChain
from memory_systems.GMemory.mas.utils import EmbeddingFunc, get_model_type
from specialized_agents.tool_agent import ToolAgent
from specialized_agents.self_evolution_agent import SelfEvolutionAgent
from memory_systems.vector_db import ZillizVectorDB as VectorDB # Import the new VectorDB # Import SelfEvolutionAgent
from output_generator import OutputGenerator # Import OutputGenerator
from memory_systems.chat_history_manager import ChatHistoryManager # Import ChatHistoryManager
from memory_systems.agent_metadata_db import AgentInfoDB # Import AgentInfoDB
if config.ENABLE_SANDBOX_EXECUTION:
    from tools.builtin.execute_in_sandbox import execute_in_sandbox # Import execute_in_sandbox
else:
    logger.warning("Sandbox execution is disabled. 'execute_in_sandbox' will not be available.")
    def execute_in_sandbox(*args, **kwargs):
        raise NotImplementedError("Sandbox execution is disabled in config.py")

class RootAgent:
    def __init__(self, base_work_dir=None, userid: str = "test111", openid: str = "zyinfoai"):
        
        self.userid = userid
        self.openid = openid
        #do not change this!
        self.is_root_agent = True
        #for debug
        self.use_gmemory = True
        if not self.use_gmemory:
            logger.info("not use_gmemory~ for debug")
        #for debug
        self.use_vector_memory = True
        if not self.use_vector_memory:
            logger.info("not use_vector_memory~ for debug")

        self.base_work_dir = base_work_dir if base_work_dir else os.getcwd()
        self.llm = LLMInterface()
        self.os_info = sys.platform # Initialize os_info here
        setup_logging(os.path.join(self.base_work_dir, "saia_log.log")) # Setup logging for saia_log.log
        
        logger.info(f"GEMINI_API_KEY in RootAgent: {os.getenv("GEMINI_API_KEY")}")
        self.llm_log_base_dir = os.path.join(self.base_work_dir, "temp", "opt_llm")
        # if os.path.exists(self.llm_log_base_dir):
        #     try:
        #         shutil.rmtree(self.llm_log_base_dir)
        #         logger.info(f"Cleared existing LLM log base directory: {self.llm_log_base_dir}")
        #     except Exception as e:
        #         pass
        os.makedirs(self.llm_log_base_dir, exist_ok=True)
        self.llm.set_log_dir(self.llm_log_base_dir) # Add this line to set the log directory
        self.tool_manager = ToolManager()
        self.tool_agent = ToolAgent()
        self.self_evolution_agent = SelfEvolutionAgent()
        self.semantic_memory = VectorDB() # Instantiate the semantic memory
        self.output_generator = OutputGenerator() # Instantiate OutputGenerator
        self.chat_history_manager = ChatHistoryManager() # Instantiate ChatHistoryManager
        self.agent_metadata_db = AgentInfoDB() # Instantiate AgentInfoDB
        self.read_chat_history = True # New flag to control reading chat history
        self.memory_add_count = 0 # Initialize memory add counter
        self.memory_retrieve_count = 0 # Initialize memory retrieve counter
        self.current_project = None # Initialize current_project
        self.current_task = None # Initialize current_task

        model_type = "deepseek-v3" # Define model_type here
        working_dir = os.path.join(self.base_work_dir, ".db", get_model_type(model_type), "api", "gmemory")
        os.makedirs(working_dir, exist_ok=True)

        self.gmemory = GMemory(
            llm_model=GMemoryLLMWrapper(self.llm),
            embedding_func=EmbeddingFunc(),
            namespace = "gmemory",
            global_config={
                "hop": 1,
                "start_insights_threshold": 5,
                "rounds_per_insights": 5,
                "insights_point_num": 5,
                "working_dir": working_dir
            }
        )
        
        self.task_counter = 0 # For evolution trigger
        self.evolution_trigger_threshold = 5 # Trigger evolution every 5 tasks
        self.successful_tasks = 0
        self.failed_tasks = 0
        self.total_execution_time = 0.0
        self.task_history = [] # Stores (success, execution_time) for recent tasks
        self.llm_usage_metrics = {
            "total_input_tokens": 0,
            "total_output_tokens": 0,
            "total_cost": 0.0,
            "task_llm_metrics": [] # List of dictionaries for each LLM call within a task
        }
        self.task_states = {} # Initialize task_states dictionary
        self.utils = Utils(self) # Instantiate Utils
        self.task_manager_instance = TaskManager(self) # Instantiate TaskManager
        self.memory_manager_instance = MemoryManager(self) # Instantiate MemoryManager
        self.task_assessment_instance = TaskAssessment(self) # Instantiate TaskAssessment
        self.task_value_assessor = TaskValueAssessor(self) # Instantiate TaskValueAssessor
        self.task_status_manager_instance = TaskStatusManager(self) # Instantiate TaskStatusManager
        self.tool_utils_instance = ToolUtils(self) # Instantiate ToolUtils
        self.evolution_manager_instance = EvolutionManager(self) # Instantiate EvolutionManager

    
    def select_agent(self, user_command: str) -> str:
        """
        Selects the most appropriate agent based on the user command and available agent metadata.
        """
        # Check agent selection mode from config
        selection_mode = config.AGENT_SELECTION_MODE.lower()
        
        if selection_mode == "bypass":
            logger.info("Agent selection bypassed. Forcing rootagent.")
            return "rootagent"
        elif selection_mode == "force":
            forced_agent = config.FORCE_AGENT_NAME.lower()
            logger.info(f"Agent selection forced to: {forced_agent}")
            return forced_agent
        
        # If mode is "auto" or unrecognized, proceed with LLM-based selection
        available_agents = self.agent_metadata_db.get_all_agents()
        
        # Filter out disabled agents and rootagent itself
        selectable_agents = {
            name: info for name, info in available_agents.items() 
            if info.get("enabled", True) and name.lower() != "rootagent"
        }

        if not selectable_agents:
            logger.info("No selectable agents found. Defaulting to rootagent.")
            return "rootagent"

        agent_descriptions = []
        for agent_name, agent_info in selectable_agents.items():
            agent_descriptions.append(f"Agent Name: {agent_name}\nDescription: {agent_info.get('description', 'No description provided.')}\n")

        prompt = f"""你是一个高级AI助理，负责根据用户指令选择最合适的Agent来处理任务。
以下是可用的Agent及其描述：

{os.linesep.join(agent_descriptions)}

用户指令：{user_command}

请根据用户指令和Agent的描述，选择一个最适合处理该任务的Agent的名称。
你的回答必须是且仅是所选Agent的名称，不要包含任何其他文字或解释。
如果没有任何Agent适合，请返回 "rootAgent"。
        """
        try:
            response = self.llm.generate_content(prompt, task_type="agent_selection")
            selected_agent_name = response["output_content"].strip().lower()
            
            if selected_agent_name in [name.lower() for name in selectable_agents.keys()]:
                logger.info(f"Selected agent: {selected_agent_name}")
                return selected_agent_name
            else:
                logger.warning(f"LLM selected an invalid agent '{selected_agent_name}'. Defaulting to rootagent.")
                return "rootagent"
        except Exception as e:
            logger.error(f"Error during agent selection: {e}", exc_info=True)
            return "rootagent"

    def get_llm_metrics(self):
        return self.llm_usage_metrics


    def get_memory_status(self):
        gmemory_status = {"enabled": self.use_gmemory, "memory_count": 0}
        if self.use_gmemory:
            try:
                # This is a placeholder. You might need to implement a method in GMemory
                # to get the actual count of memories or other relevant stats.
                # For now, we'll just indicate it's enabled.
                gmemory_status["memory_count"] = len(self.gmemory.retrieve_memory(query_task="")[0]) + len(self.gmemory.retrieve_memory(query_task="")[1])
            except Exception as e:
                logger.warning(f"Could not get GMemory count: {e}")

        semantic_memory_status = {"enabled": self.use_vector_memory, "memory_count": 0}
        if self.use_vector_memory:
            try:
                # This is a placeholder. You might need to implement a method in VectorDB
                # to get the actual count of memories or other relevant stats.
                # For now, we'll just indicate it's enabled.
                # Assuming semantic_memory has a method to get collection stats or count
                # For Zilliz, you might need to query the collection directly.
                # Example: semantic_memory_status["memory_count"] = self.semantic_memory.get_collection_count()
                semantic_memory_status["memory_count"] = self.semantic_memory.count_entities(self.semantic_memory.collection_name)
            except Exception as e:
                logger.warning(f"Could not get Semantic Memory count: {e}")

        return {
            "gmemory": gmemory_status,
            "semantic_memory": semantic_memory_status
        }


    def process_command(self, command: str, current_situation: dict, userid: str, openid: str, max_run_steps: int = None) -> dict:
        print(f"RootAgent: process_command called with command: {command}")
        logger.info(f"Processing command: {command}")
        task_start_time = datetime.now() # Start timing the entire task
        final_assistant_reply = ""
        selected_agent_name = "rootagent" # Default to rootagent
        
        try:
            if (not self.is_root_agent) and config.FORCE_AGENT_NAME != "rootagent": 
                # Step 1: Select the appropriate agent
                selected_agent_name = self.select_agent(command)
                logger.info(f"Agent selected for command '{command}': {selected_agent_name}")
            # Increment run count for the selected agent
            self.agent_metadata_db.increment_run_count(selected_agent_name, success=False) # Assume failure until proven success

            if (not self.is_root_agent) or (selected_agent_name.lower() == "rootagent" or config.FORCE_AGENT_NAME == "rootagent"):
                # Original RootAgent logic
                command, project_name, task_name, error_response = initialize_task(self, command, current_situation)
                if error_response:
                    return error_response

                # Task Value Assessment
                assessment = self.task_value_assessor.assess_task(command)
                if assessment.get("value") == "low" and assessment.get("cost") == "high":
                    warning_message = f"Warning: The requested task has been assessed as low-value but high-cost. Reason: {assessment.get('reasoning')}. Proceeding anyway, but please consider if this is intended."
                    logger.warning(warning_message)

                processing_history = []
                all_plan = None
                current_step_in_plan = 0
                start_time = time.time()
                last_info = self._load_last_info(userid)
                current_max_rounds = max_run_steps if max_run_steps is not None else config.MAX_ROUNDS
                results = [] # Initialize results for rootagent

                current_chat_history = ""
                if self.read_chat_history:
                    current_chat_history = self.chat_history_manager.get_recent_chat_history(userid)
                
                current_chat_history += "user: "+command+"\n"
                
                for round_num in range(current_max_rounds):
                    if time.time() - start_time > config.TIMEOUT_SECONDS:
                        final_assistant_reply = self._handle_timeout(processing_history)
                        break

                    if len(processing_history) > config.MAX_HISTORY_LENGTH:
                        processing_history = history_compressor.summarize(processing_history, self.llm)
                        processing_history.insert(0, "History has been summarized.")

                    current_memory_context = ""
                    if last_info and round_num == 0:
                        last_info_str = json.dumps(last_info, ensure_ascii=False, indent=2)
                        current_memory_context = f"Previous Task Info (for initial reference):```json\n{last_info_str}\n```\n" + current_memory_context
                    else:
                        initial_retrieved_memories = self.memory_manager_instance.retrieve_memory(command)
                        self.memory_retrieve_count += 1 # Increment memory retrieve count
                        if initial_retrieved_memories["semantic_summaries"]:
                            current_memory_context += "Relevant Past Experiences (Semantic Memory):\n"
                            for item in initial_retrieved_memories["semantic_summaries"]:
                                summary_text = item["summary"]
                                metadata = item["metadata"]
                                if metadata:
                                    success_rate = (metadata.get("success_count", 0) / metadata.get("total_references", 1)) * 100 if metadata.get("total_references", 1) > 0 else 0
                                    current_memory_context += f"Summary: {summary_text}\n  (Success Rate: {success_rate:.2f}%, Total References: {metadata.get("total_references", 0)}, Avg Cost: ${metadata.get("average_cost", 0.0):.4f}, Avg Duration: {metadata.get("average_duration", 0.0):.2f}s)\n"
                                else:
                                    current_memory_context += f"Summary: {summary_text}\n"
                        if initial_retrieved_memories["insights"]:
                            current_memory_context += "Relevant Insights (GMemory):" + "\n".join(initial_retrieved_memories["insights"])

                    tool_descriptions = self.tool_utils_instance.get_tool_descriptions()

                    prompt = format_prompt(
                        round_num=round_num,
                        user_command=command,
                        chat_history=current_chat_history,
                        memory_context=current_memory_context,
                        file_context=current_situation.get("file_context", ""),
                        tool_descriptions=tool_descriptions,
                        current_situation=current_situation,
                        all_plan=all_plan,
                        current_step_in_plan=current_step_in_plan,
                        processing_history=[],
                        last_info=last_info if round_num == 0 else None
                    )

                    llm_response_data = self.llm.generate_content(prompt, task_type="thinking", task_name=task_name)
                    llm_output = llm_response_data["output_content"]
                    self.llm_usage_metrics["total_input_tokens"] += llm_response_data["input_tokens"]
                    self.llm_usage_metrics["total_output_tokens"] += llm_response_data["output_tokens"]
                    self.llm_usage_metrics["total_cost"] += llm_response_data["cost"]
                    self.llm_usage_metrics["task_llm_metrics"].append({
                        "task_type": "thinking",
                        "input_tokens": llm_response_data["input_tokens"],
                        "output_tokens": llm_response_data["output_tokens"],
                        "cost": llm_response_data["cost"]
                    })
                    logger.debug(f"LLM response for thinking: {llm_output}")

                    parsed_output = parse_llm_output(llm_output)
                    logger.info(f"Parsed LLM Output: {parsed_output}")

                    if parsed_output.get("think"):
                        processing_history.append(f"Thought: {parsed_output['think']}")

                    if parsed_output.get("reply"):
                        final_assistant_reply = parsed_output["reply"]
                        logger.info(f"Direct reply generated: {final_assistant_reply}")
                        break
                    elif parsed_output.get("clarify"):
                        final_assistant_reply = parsed_output["clarify"]
                        logger.info(f"Clarifying questions: {final_assistant_reply}")
                        break
                    elif parsed_output.get("all_plan") and not all_plan:
                        all_plan = parsed_output["all_plan"]
                        processing_history.append(f"Action: Generated a new plan.\nPlan:\n{all_plan}")
                        current_step_in_plan = 1
                        current_max_rounds += 50
                        logger.info(f"Detailed plan (all_plan) detected. Starting execution from step {current_step_in_plan}.")
                        continue
                    elif parsed_output.get("revise_plan"):
                        all_plan = parsed_output["revise_plan"]
                        processing_history.append(f"Action: Revised the plan.\nNew Plan:\n{all_plan}")
                        current_step_in_plan = 1
                        logger.info(f"Plan revised. Starting execution from step {current_step_in_plan}.")
                        continue
                    elif parsed_output.get("tool_calls"):
                        logger.info("Tool calls detected.")
                        tasks = parsed_output["tool_calls"]
                        if tasks:
                            execution_result = execute_plan(self, tasks, command, current_situation, userid, openid)
                            
                            if execution_result.get("status") == "success":
                                results.append(execution_result["final_result"])
                                current_tool_output = execution_result["final_result"]
                                
                                action_str = f"Action on round {round_num}: \n# 使用工具："+tasks 
                                observation_str = f"Observation from Tool Execution:\n{current_tool_output}"
                                processing_history.append(f"{action_str}\n\n{observation_str}")
                                current_chat_history += "\nassistant: "+f"{action_str}\n\n{observation_str}"
                                
                                # current_tool_output = "# 使用工具："+tasks +\
                                #     "\n\n#运行结果：\n<tool_result>\n" + current_tool_output+"\n</tool_result>\n"

                                # prompt = format_prompt(
                                #     round_num=round_num,
                                #     user_command=command,
                                #     chat_history=current_chat_history,
                                #     memory_context=current_memory_context,
                                #     file_context=current_situation.get("file_context", ""),
                                #     tool_descriptions=tool_descriptions,
                                #     current_situation=current_situation,
                                #     all_plan=all_plan,
                                #     current_step_in_plan=current_step_in_plan,
                                #     processing_history=processing_history,
                                #     last_info=last_info if round_num == 0 else None,
                                #     tool_result=current_tool_output,
                                # )

                                # llm_response_data = self.llm.generate_content(prompt, task_type="output_result", task_name=task_name)
                                # llm_output = llm_response_data["output_content"]
                                
                                # if all_plan:
                                #     current_step_in_plan += 1
                                #     if current_step_in_plan > len(all_plan):
                                #         logger.info("All steps in the plan have been executed.")
                                #         final_assistant_reply = current_tool_output
                                #         break
                                # else:
                                #     final_assistant_reply = current_tool_output
                                #     break
                            elif execution_result.get("status") == "time_overrun":
                                error_message = execution_result.get("error_message", "Time overrun during plan execution.")
                                failed_step_info = execution_result.get("failed_step", {})
                                logger.warning(f"Plan execution time overrun: {error_message}. Current step: {failed_step_info}")
                                action_str = f"Action on round {round_num}: Tool Calls (Time Overrun)"
                                processing_history.append(action_str)
                                observation_str =f"Observation from Tool Execution (Time Overrun):\nError: {error_message}\nProblematic Step: {failed_step_info.get("objective", "N/A")}\nTool: {failed_step_info.get("tool_name", "N/A")}"
                                processing_history.append(observation_str)
                                current_chat_history += "\nassistant:" + action_str + "\n\n" + observation_str
                                continue
                            else: # Plan execution failed
                                error_message = execution_result.get("error_message", "Unknown error during plan execution.")
                                failed_step_info = execution_result.get("failed_step", {})
                                logger.error(f"Action on round {round_num}: Tool Calls (Failed)")
                                action_str = f"Action on round {round_num}: Tool Calls (Time Overrun)"
                                observation_str =f"Observation from Tool Execution (Failure):\nError: {error_message}\nFailed Step: {failed_step_info.get("objective", "N/A")}\nTool: {failed_step_info.get("tool_name", "N/A")}"
                                processing_history.append(action_str)
                                processing_history.append(observation_str)
                                current_chat_history += "\nassistant:" + action_str + "\n\n" + observation_str
                                continue 
                        else:
                            logger.warning("No tasks found within <tool_calls>.")
                            final_assistant_reply = "Error: No executable tasks found in tool calls."
                            break
                    elif parsed_output.get("memory_queries"):
                        logger.info("Memory retrieval detected.")
                        memory_queries = parsed_output["memory_queries"]
                        
                        combined_semantic_summaries = []
                        combined_insights = []
                        for query in memory_queries:
                            retrieved_memories = self.memory_manager_instance.retrieve_memory(query)
                            self.memory_retrieve_count += 1 # Increment memory retrieve count
                            combined_semantic_summaries.extend(retrieved_memories["semantic_summaries"])
                            combined_insights.extend(retrieved_memories["insights"])

                        current_retrieved_memories = {
                            "semantic_summaries": combined_semantic_summaries,
                            "insights": combined_insights
                        }
                        
                        action_str = f"Action on round {round_num}: Memory Search"
                        observation_str = f"Observation from Memory:\n{current_memory_context}"
                        processing_history.append(f"{action_str}\n{observation_str}")

                        current_chat_history += "\nassistant:" + action_str + "\n\n" + observation_str
                        
                        current_memory_context = ""
                        all_memories = combined_insights + combined_semantic_summaries
                        
                        for mem_item in all_memories:
                            item_size = len(mem_item.encode('utf-8'))
                            if len(current_memory_context.encode('utf-8')) + item_size + len("\n\n".encode('utf-8')) <= config.MAX_MEMORY_CONTEXT_LENGTH:
                                if current_memory_context:
                                    current_memory_context += "\n\n"
                                current_memory_context += mem_item
                            else:
                                logger.warning(f"Memory context limit reached. Skipping further memory items. Current size: {len(current_memory_context.encode('utf-8'))} bytes.")
                                break
                        
                        logger.info(f"Updated memory context after retrieval: {current_memory_context[:200]}...")
                        logger.info(f"Full Current Memory Context after retrieval: {current_memory_context}")
                    else:
                        logger.warning(f"Unrecognized action from LLM: {llm_output}. Treating as direct reply.")
                        final_assistant_reply = llm_output
                        break

                if not final_assistant_reply:
                    if (time.time() - start_time) >= config.TIMEOUT_SECONDS:
                        final_assistant_reply = self._handle_timeout(processing_history)
                    elif round_num >= current_max_rounds -1:
                        final_assistant_reply = self._handle_max_rounds(processing_history)
                    else:
                        if results:
                            summary_content = "\n".join(results)
                            summary_prompt = f"Based on the following execution results, please provide a concise summary or final answer to the user:\n\n{summary_content}\n\nSummary:"
                            try:
                                llm_response = self.llm.generate_content(summary_prompt, task_type="summary")
                                final_assistant_reply = llm_response["output_content"]
                            except Exception as e:
                                logger.error(f"Error summarizing results: {e}", exc_info=True)
                                final_assistant_reply = "任务已完成，但无法生成详细总结。请检查日志获取更多信息。"
                        else:
                            final_assistant_reply = "Could not determine a clear response. Please try again or rephrase your request."
                
                task_end_time = datetime.now()
                total_task_duration = (task_end_time - task_start_time).total_seconds()
                
                self.memory_manager_instance.add_memory(command, all_plan if all_plan else [], results, 
                                                        success=bool(final_assistant_reply), userid=userid, openid=openid, 
                                                        total_duration=total_task_duration, total_cost=self.llm_usage_metrics["total_cost"])
                self.memory_add_count += 1 # Increment memory add count

                self.task_counter += 1
                if self.task_counter >= self.evolution_trigger_threshold:
                    logger.info(f"Task counter reached {self.evolution_trigger_threshold}. Initiating self-evolution task.")
                    self.evolution_manager_instance.initiate_evolution_task(userid=userid, openid=openid, command=command)
                    self.task_counter = 0

                self._save_last_info(userid, {
                    "last_tool_output": current_tool_output if 'current_tool_output' in locals() else "",
                    "last_retrieved_memories": current_retrieved_memories if 'current_retrieved_memories' in locals() else {}
                })

                self.task_states[task_name]["status"] = "completed"
                self.task_states[task_name]["progress"] = 100
                self.task_states[task_name]["result"] = final_assistant_reply
                
                self.agent_metadata_db.increment_run_count(selected_agent_name, success=True) # Mark as success
                return {
                    "task_id": task_name,
                    "final_result": final_assistant_reply,
                    "llm_usage_metrics": self.llm_usage_metrics,
                    "total_task_duration": total_task_duration
                }
            else:
                # Dynamically load and run the selected agent
                agent_module_path = os.path.join(self.base_work_dir, "agents", selected_agent_name, f"{selected_agent_name}.py")
                if not os.path.exists(agent_module_path):
                    raise FileNotFoundError(f"Agent module not found: {agent_module_path}")

                spec = importlib.util.spec_from_file_location(selected_agent_name, agent_module_path)
                if spec is None:
                    raise ImportError(f"Could not load spec for agent: {selected_agent_name}")
                agent_module = importlib.util.module_from_spec(spec)
                sys.modules[selected_agent_name] = agent_module
                spec.loader.exec_module(agent_module)

                # Assuming the agent class name is the capitalized version of the agent_name
                agent_class_name = "Agent"
                if not hasattr(agent_module, agent_class_name):
                    raise AttributeError(f"Agent class '{agent_class_name}' not found in module '{selected_agent_name}'")
                
                AgentClass = getattr(agent_module, agent_class_name)
                # Pass necessary parameters to the agent's constructor
                # This might need to be more dynamic based on actual agent constructors
                agent_instance = AgentClass(base_work_dir=self.base_work_dir, userid=userid, openid=openid) 
                
                # Call the agent's process_command method
                agent_result = agent_instance.process_command(command, current_situation, userid, openid, max_run_steps)
                final_assistant_reply = agent_result.get("final_result", "")
                
                # Update agent run count and success status
                self.agent_metadata_db.increment_run_count(selected_agent_name, success=True) # Mark as success
                return agent_result

        except Exception as e:
            error_message = f"An unexpected error occurred during command processing: {e}"
            logger.error(error_message, exc_info=True)
            full_trace = traceback.format_exc()
            print("完整的堆栈跟踪信息：")
            print(full_trace)
            
            # If an error occurs, mark the selected agent as failed
            self.agent_metadata_db.increment_run_count(selected_agent_name, success=False)

            # Ensure results are saved even on unexpected errors
            save_task_data_func(self, "result_log.txt", f"Error: {error_message}\n\nPartial Results:\nNo results captured.")
            
            # Update task states for the current task
            task_name = current_situation.get("task_name", "unknown_task") # Get task_name if available
            if task_name in self.task_states:
                self.task_states[task_name]["status"] = "failed"
                self.task_states[task_name]["progress"] = 0
                self.task_states[task_name]["error"] = error_message
            
            final_assistant_reply = f"Error: {error_message}"
            return {
                "task_id": current_situation.get("task_name", "unknown_task"),
                "final_result": f"Error: {error_message}",
                "llm_usage_metrics": self.llm_usage_metrics # Return current metrics even on error
            }
        finally:
            # Append chat history at the end of process_command
            if final_assistant_reply:
                self.chat_history_manager.append_chat_history(userid, command, final_assistant_reply)
            else:
                logger.warning("final_assistant_reply was empty or not defined, not appending to chat history.")

    def _save_last_info(self, userid: str, last_info: dict):
        """Saves the last tool output and memory context for a user."""
        if not userid:
            return
        try:
            user_data_dir = os.path.join(self.base_work_dir, "data", userid)
            os.makedirs(user_data_dir, exist_ok=True)
            last_info_path = os.path.join(user_data_dir, "last_info.json")
            with open(last_info_path, 'w', encoding='utf-8') as f:
                json.dump(last_info, f, ensure_ascii=False, indent=2)
            logger.info(f"Saved last_info for user {userid} to {last_info_path}")
        except Exception as e:
            logger.error(f"Error saving last_info for user {userid}: {e}", exc_info=True)

    def _load_last_info(self, userid: str) -> dict:
        """Loads the last tool output and memory context for a user."""
        if not userid:
            return {}
        try:
            last_info_path = os.path.join(self.base_work_dir, "data", userid, "last_info.json")
            if os.path.exists(last_info_path):
                with open(last_info_path, 'r', encoding='utf-8') as f:
                    info = json.load(f)
                    logger.info(f"Loaded last_info for user {userid} from {last_info_path}")
                    return info
            return {}
        except Exception as e:
            logger.error(f"Error loading last_info for user {userid}: {e}", exc_info=True)
            return {}

    def _handle_timeout(self, processing_history: list) -> str:
        logger.warning("Task timed out. Summarizing current progress.")
        summary_prompt = f"The task timed out. Please summarize the following processing history and provide a concise status update or partial result:\n\n{os.linesep.join(processing_history)}\n\nSummary:"
        try:
            llm_response = self.llm.generate_content(summary_prompt, task_type="summary")
            return f"抱歉，任务处理超时。根据现有信息，我总结如下：\n{llm_response["output_content"]}"
        except Exception as e:
            logger.error(f"Error summarizing on timeout: {e}", exc_info=True)
            return "抱歉，任务处理超时，且无法生成总结。"

    def _handle_max_rounds(self, processing_history: list) -> str:
        logger.warning("Reached maximum thinking iterations. Summarizing current progress.")
        summary_prompt = f"The task reached maximum iterations. Please summarize the following processing history and provide a concise status update or partial result:\n\n{os.linesep.join(processing_history)}\n\nSummary:"
        try:
            llm_response = self.llm.generate_content(summary_prompt, task_type="summary")
            return f"抱歉，任务处理已达到最大轮次限制。根据现有信息，我总结如下：\n{llm_response["output_content"]}"
        except Exception as e:
            logger.error(f"Error summarizing on max rounds: {e}", exc_info=True)
            return "抱歉，任务处理已达到最大轮次限制，且无法生成总结。"

    def shutdown(self):
        logger.info("Shutting down RootAgent resources.")
        if self.use_gmemory and hasattr(self.gmemory, 'close'):
            try:
                self.gmemory.close()
                logger.info("GMemory connection closed.")
            except Exception as e:
                logger.error(f"Error closing GMemory: {e}", exc_info=True)
        # Add other resource cleanup here if necessary (e.g., vector_db)

    def pause_task(self, task_id: str):
        self.task_status_manager_instance.pause_task(task_id)

    def resume_task(self, task_id: str):
        self.task_status_manager_instance.resume_task(task_id)

    def cancel_task(self, task_id: str):
        self.task_status_manager_instance.cancel_task(task_id)

    def get_task_status(self, task_id: str):
        return self.task_status_manager_instance.get_task_status(task_id)
