import inspect # Import inspect module
import os
import re
import json
import time
import uuid
import xml.etree.ElementTree as ET
from datetime import datetime
from loguru import logger
import sys
import importlib
import config

from root_agent.utils.save_task_data import save_task_data_func

if config.ENABLE_SANDBOX_EXECUTION:
    from tools.builtin.execute_in_sandbox import execute_in_sandbox
else:
    logger.warning("Sandbox execution is disabled. 'execute_in_sandbox' will not be available.")
    def execute_in_sandbox(*args, **kwargs):
        raise NotImplementedError("Sandbox execution is disabled in config.py")

class ToolExecutionError(Exception):
    """Custom exception for errors during tool execution."""
    pass

class InvalidParametersError(ToolExecutionError):
    """Custom exception for invalid parameters passed to a tool."""
    pass

class APIError(ToolExecutionError):
    """Custom exception for API-related errors during tool execution."""
    pass

def create_plan(agent, user_goal: str, current_situation: dict, userid: str, openid: str, chat_history: str = "", memory_context: str = "", file_context: str = "") -> list[dict]:
    logger.info(f"Creating plan for user goal: {user_goal}")
    tool_descriptions = agent.tool_manager.get_tool_descriptions()
    work_dir = current_situation.get("work_dir", "Unknown")

    # Stage 1: Generate high-level objectives
    current_date = datetime.now().strftime("%Y年%m月%d日 %H:%M:%S")
    current_working_dir = os.getcwd()

    high_level_prompt = f"""Given the user's goal, current situation, chat history, and relevant memories, determine the best course of action.

If the user's goal is simple and can be answered directly (e.g., a factual question, a request for clarification, or a simple command that can be executed with a single tool call), provide the direct answer or the single tool call in the <task> XML format.
If the user's goal is complex and requires multiple steps or further clarification, generate a detailed, step-by-step plan as a numbered list of high-level objectives. If the request is unclear or lacks sufficient information, ask clarifying questions to gather more details before proceeding with a plan.

Current Date and Time: {current_date}
Operating System: {agent.os_info}
Current Working Directory: {current_working_dir}
User Goal: {user_goal}
Chat History: {chat_history}
Memory Context: {memory_context}
File Context: {file_context}

Consider the following:
- If the goal is a simple question, provide the answer directly.
- If the goal is a simple command, provide the <task> XML for a single tool call.
- If the goal is complex, break it down into a numbered list of high-level objectives.
- If the goal is unclear, ask clarifying questions.

Example of direct answer for a simple question:
"The current date is {current_date}."

Example of a single tool call for a simple command:
<task><tool_name>list_directory</tool_name><params><path>./</path></params><expect>A list of files and directories.</expect><dependencies></dependencies><time>1</time><failure_policy>retry 3 times</failure_policy></task>

Example of a plan for a complex goal:
1. Understand the project structure.
2. Identify relevant files for modification.
3. Implement the new feature.
4. Write unit tests for the new feature.

Example of asking clarifying questions for an unclear goal:
"Could you please provide more details about [specific aspect]? For example, what is the desired output format or what specific files should I focus on?"

Your response should either be a direct answer, a single <task> XML, or a numbered list of objectives/clarifying questions.
"""
    llm_response_data = agent.llm.generate_content(high_level_prompt, task_type="planning")
    high_level_response = llm_response_data["output_content"]
    agent.llm_usage_metrics["total_input_tokens"] += llm_response_data["input_tokens"]
    agent.llm_usage_metrics["total_output_tokens"] += llm_response_data["output_tokens"]
    agent.llm_usage_metrics["total_cost"] += llm_response_data["cost"]
    agent.llm_usage_metrics["task_llm_metrics"].append({
        "task_type": "planning",
        "input_tokens": llm_response_data["input_tokens"],
        "output_tokens": llm_response_data["output_tokens"],
        "cost": llm_response_data["cost"]
    })
    
    objectives = []
    numbered_list_matches = re.findall(r'^\s*\d+\.\s*(.*)', high_level_response, re.MULTILINE)
    if numbered_list_matches:
        objectives = [obj.strip() for obj in numbered_list_matches]
    else:
        objectives = [line.strip() for line in high_level_response.split('\n') if line.strip()]

    if not objectives:
        logger.error(f"Failed to generate high-level objectives. LLM Response: {high_level_response}")
        return []

    plan = []
    for objective in objectives:
        logger.debug(f"Refining objective: {objective}")
        
        sandbox_example_text = ""
        if ENABLE_SANDBOX_EXECUTION:
            sandbox_example_text = """Example for `execute_in_sandbox` with complex Python code:
<task><tool_name>execute_in_sandbox</tool_name><params><![CDATA[<code_execution><code_to_execute>import os

file_path = "my_file.txt"
content = "Hello, World! This is a test."

with open(file_path, "w") as f:
    f.write(content)

print(f"File {{file_path}} created with content: {{content}}")</code_to_execute><timeout>10</timeout></code_execution>]]></params><expect>A file named 'my_file.txt' is created with the specified content, and a confirmation message is printed.</expect><dependencies></dependencies><time>1</time><failure_policy>retry 3 times</failure_policy></task>"""
        else:
            sandbox_example_text = """# Note : `execute_in_sandbox` is currently disabled. Please avoid using it."""

        refine_prompt = f"""Given the objective, select the best tool and parameters to achieve it.
Objective: {objective}

Available tools:
{tool_descriptions}

{sandbox_example_text}

Respond with a single XML <task> tag containing: <tool_name>, <params>, <expect>, <dependencies>, <time>, and <failure_policy>.
For <params>, provide arguments as nested XML tags.
Specifically, for the 'LLM' tool, the <params> should contain <prompt> and <content> tags, like this: <params><prompt>Your prompt here</prompt><content>Your content here</content></params>.

Example:
if you want call : list_directory("C:/projects/my_project")`

you need output:
<task><tool_name>list_directory</tool_name>
<params><path>C:/projects/my_project</path></params>
<expect>dir list, files or directories in the specified path</expect>
<dependencies></dependencies><time>10</time>
<failure_policy>retry 3 times</failure_policy></task>

---
For other tools, provide arguments as nested XML tags. If there's only one argument, wrap it in a <value> tag.
Example for multiple XML params: <params><path>file.txt</path><content>Hello</content></params>
Example for single XML param: <params><value>some_string_value</value></params>

Example (Tool Use - Screen_shot):
<task>
  <tool_name>Screen_shot</tool_name>
  <params></params>
  <expect>A file path to the saved screenshot is returned.</expect>
  <dependencies></dependencies><time>5</time>
  <failure_policy>retry 3 times</failure_policy>
</task>
""".format(objective=objective, tool_descriptions=tool_descriptions)
        retries = 0
        max_retries = 3
        task_generated = False
        llm_response = ""
        while retries < max_retries and not task_generated:
            llm_response_data = agent.llm.generate_content(refine_prompt, task_type="planning")
            llm_response = llm_response_data["output_content"]
            agent.llm_usage_metrics["total_input_tokens"] += llm_response_data["input_tokens"]
            agent.llm_usage_metrics["total_output_tokens"] += llm_response_data["output_tokens"]
            agent.llm_usage_metrics["total_cost"] += llm_response_data["cost"]
            agent.llm_usage_metrics["task_llm_metrics"].append({
                "task_type": "planning",
                "input_tokens": llm_response_data["input_tokens"],
                "output_tokens": llm_response_data["output_tokens"],
                "cost": llm_response_data["cost"]
            })
            logger.debug(f"LLM response for objective '{objective}': {llm_response}")
            
            xml_match = re.search(r'<task>(.*?)</task>', llm_response, re.DOTALL)
            if not xml_match:
                logger.warning(f"Attempt {retries + 1}/{max_retries}: Failed to generate a valid <task> XML for objective: {objective}. Retrying...")
                retries += 1
                time.sleep(1)
                continue

            xml_string = f"<task>{xml_match.group(1)}</task>"
            try:
                task_elem = ET.fromstring(xml_string)
                task = {
                    "objective": objective,
                    "tool_name": task_elem.findtext("tool_name", "").strip(),
                    "params": ET.tostring(task_elem.find("params"), encoding='unicode').strip() if task_elem.find("params") is not None else "",
                    "expect": task_elem.findtext("expect", "").strip(),
                    "dependencies": task_elem.findtext("dependencies", "").strip(),
                    "time": task_elem.findtext("time", "").strip(),
                    "failure_policy": task_elem.findtext("failure_policy", "request user").strip(),
                    "new_tool_code": task_elem.findtext("new_tool_code", "").strip()
                }
                plan.append(task)
                task_generated = True
            except ET.ParseError as e:
                logger.warning(f"Attempt {retries + 1}/{max_retries}: Error parsing <task> XML for objective '{objective}': {e}. Retrying...", exc_info=True)
                retries += 1
                time.sleep(1)
            except AttributeError as e:
                logger.warning(f"Attempt {retries + 1}/{max_retries}: Missing attribute in <task> XML for objective '{objective}': {e}. Retrying...", exc_info=True)
                retries += 1
                time.sleep(1)
        
        if not task_generated:
            logger.error(f"Failed to generate a valid task for objective '{objective}' after {max_retries} attempts. Skipping this objective.")

    logger.info(f"Successfully created plan with {len(plan)} steps.")
    logger.debug(f"Generated plan: {plan!r}")
    return plan

def execute_plan(agent, plan: list[dict], user_command: str, current_situation: dict, userid: str, openid: str) -> dict:
    logger.info(f"Executing plan with {len(plan)} steps.")
    results = []
    task_id = str(uuid.uuid4())
    plan_start_time = time.time()
    try:
        # Calculate total estimated time for the plan
        total_estimated_time = sum(float(step.get("time", 0)) for step in plan)
    except (ValueError, TypeError):
        total_estimated_time = 300

    for i, step in enumerate(plan):
        objective = step.get("objective", "")
        tool_name = step.get("tool_name", "")
        params = step.get("params", "")
        expect = step.get("expect", "")
        failure_policy = step.get("failure_policy", "request user")
        llm_response = ""

        logger.info(f"--- Starting Step {i+1}/{len(plan)} ---")
        logger.info(f"Step {i+1} Details: Objective: {objective}, Tool: {tool_name}, Params: {params!r}")

        # Dynamic Time Management Check
        current_elapsed_time = time.time() - plan_start_time
        # Calculate expected elapsed time based on linear progression
        expected_elapsed_time = (i + 1) / len(plan) * total_estimated_time

        # If current elapsed time is significantly more than expected, trigger a re-evaluation
        if total_estimated_time > 0 and current_elapsed_time > expected_elapsed_time * 1.5:
            logger.warning(f"Time overrun detected for step {i+1}. Current elapsed: {current_elapsed_time:.2f}s, Expected: {expected_elapsed_time:.2f}s. Requesting re-evaluation.")
            return {
                "status": "time_overrun",
                "error_message": "Plan execution is taking longer than expected. Re-evaluation needed.",
                "failed_step": step, # Returning the current step as the point of failure for re-evaluation
                "current_results": results,
                "llm_usage_metrics": agent.llm_usage_metrics
            }
        
        tool_func = agent.tool_manager.get_tool_func(tool_name)
        if tool_func:
            tool_func = agent.tool_manager.get_tool_func(tool_name)
        if tool_func:
            if tool_name == "create_new_tool":
                logger.info("Attempting to create a new tool.")
                # Parse parameters for create_new_tool
                parsed_params = agent.utils.parse_params(params)
                new_tool_code = parsed_params.get("new_tool_code", "")
                new_tool_name = parsed_params.get("tool_name", "")
                new_tool_description = parsed_params.get("description", "")
                new_tool_usage = parsed_params.get("usage", "")
                new_tool_arguments_str = parsed_params.get("arguments", "[]") # Expecting a JSON string of arguments

                if not new_tool_code or not new_tool_name:
                    error_message = "Missing new_tool_code or tool_name for create_new_tool."
                    logger.error(error_message)
                    return {
                        "status": "failed",
                        "error_message": error_message,
                        "failed_step": step,
                        "current_results": results,
                        "llm_usage_metrics": agent.llm_usage_metrics
                    }
                
                try:
                    new_tool_arguments = json.loads(new_tool_arguments_str)
                except json.JSONDecodeError:
                    error_message = f"Invalid JSON for new_tool_arguments: {new_tool_arguments_str}"
                    logger.error(error_message)
                    return {
                        "status": "failed",
                        "error_message": error_message,
                        "failed_step": step,
                        "current_results": results,
                        "llm_usage_metrics": agent.llm_usage_metrics
                    }

                # 1. Save the new tool code to tools/generated/
                tool_file_path = os.path.join("tools", "generated", f"{new_tool_name}.py")
                try:
                    with open(tool_file_path, "w", encoding="utf-8") as f:
                        f.write(new_tool_code)
                    logger.info(f"New tool code saved to {tool_file_path}")
                except Exception as e:
                    error_message = f"Failed to save new tool code: {e}"
                    logger.error(error_message, exc_info=True)
                    return {
                        "status": "failed",
                        "error_message": error_message,
                        "failed_step": step,
                        "current_results": results,
                        "llm_usage_metrics": agent.llm_usage_metrics
                    }

                # 2. Generate and run a test for the new tool
                test_code_prompt = f"""Generate a simple Python test script for the following tool.
The test should call the tool with example parameters and print its output.
Assume the tool is defined in a file named '{new_tool_name}.py' and its main function is '{new_tool_name}'.

Tool Code:
```python
{new_tool_code}
```

Test Script:
```python
# Import the tool function
from tools.generated.{new_tool_name} import {new_tool_name}

# Example usage (replace with actual example parameters based on tool's arguments)
# For example, if the tool takes 'path' and 'content' as arguments:
# result = {new_tool_name}(path='test.txt', content='Hello World')
# print(result)

# If the tool takes no arguments:
# result = {new_tool_name}()
# print(result)

# If the tool takes a single argument 'value':
# result = {new_tool_name}(value='example_value')
# print(result)

# IMPORTANT: Replace the example usage below with actual parameters based on the tool's arguments.
# If the tool has arguments, you MUST provide example values for them.
# If the tool has no arguments, call it without arguments.

# Placeholder for actual test call based on arguments
"""
                # Dynamically generate example parameters for the test script
                example_params_str = ""
                if new_tool_arguments:
                    param_list = []
                    for arg in new_tool_arguments:
                        # Simple heuristic for example values
                        if arg['type'] == 'str':
                            example_value = f"'example_{arg['name']}'"
                        elif arg['type'] == 'int':
                            example_value = "123"
                        elif arg['type'] == 'bool':
                            example_value = "True"
                        else:
                            example_value = "None" # Fallback
                        param_list.append(f"{arg['name']}={example_value}")
                    example_params_str = ", ".join(param_list)
                
                test_code_prompt += f"result = {new_tool_name}({example_params_str})\nprint(result)\n```"

                test_script_response = agent.llm.generate_content(test_code_prompt, task_type="tool_test_generation")
                test_script_content = test_script_response["output_content"]
                
                # Extract code block from LLM response
                test_code_match = re.search(r'```python\n(.*?)```', test_script_content, re.DOTALL)
                if test_code_match:
                    test_script_content = test_code_match.group(1).strip()
                else:
                    logger.warning("No Python code block found in test script generation. Using raw LLM output.")

                logger.info("Running test for new tool...")
                test_result = agent.utils.run_python_code(test_script_content)

                if test_result["error"]:
                    error_message = f"Test for new tool '{new_tool_name}' failed: {test_result['stderr']}"
                    logger.error(error_message)
                    # Clean up the generated tool file if test fails
                    os.remove(tool_file_path)
                    return {
                        "status": "failed",
                        "error_message": error_message,
                        "failed_step": step,
                        "current_results": results,
                        "llm_usage_metrics": agent.llm_usage_metrics
                    }
                else:
                    logger.info(f"Test for new tool '{new_tool_name}' passed. Output: {test_result['stdout']}")
                    # 3. Register the new tool with ToolManager
                    # Dynamically import the new tool function
                    try:
                        spec = importlib.util.spec_from_file_location(new_tool_name, tool_file_path)
                        module = importlib.util.module_from_spec(spec)
                        sys.modules[new_tool_name] = module
                        spec.loader.exec_module(module)
                        new_tool_func = getattr(module, new_tool_name)
                        
                        agent.tool_manager.register_new_tool(
                            new_tool_name,
                            new_tool_func,
                            new_tool_description,
                            new_tool_usage,
                            new_tool_arguments
                        )
                        tool_output = f"New tool '{new_tool_name}' created, tested, and registered successfully. Test Output: {test_result['stdout']}"
                        results.append({"tool_name": "create_new_tool", "output": tool_output})
                        step_successful = True
                    except Exception as e:
                        error_message = f"Failed to register new tool '{new_tool_name}': {e}"
                        logger.error(error_message, exc_info=True)
                        os.remove(tool_file_path) # Clean up
                        return {
                            "status": "failed",
                            "error_message": error_message,
                            "failed_step": step,
                            "current_results": results,
                            "llm_usage_metrics": agent.llm_usage_metrics
                        }
            else:
                retries = 0
                max_retries = 0
                if "retry" in failure_policy:
                    try:
                        max_retries = int(re.search(r'retry (\d+) times', failure_policy).group(1))
                    except AttributeError:
                        max_retries = 0

                step_successful = False
                tool_output = ""
                error_message = ""

                while retries <= max_retries:
                    try:
                        final_params_for_tool_agent = agent.utils.parse_params(params)
                        if isinstance(final_params_for_tool_agent, dict):
                            # Check if the tool function accepts 'userid' and 'openid' before adding
                            tool_signature = inspect.signature(tool_func)
                            if 'userid' in tool_signature.parameters:
                                final_params_for_tool_agent['userid'] = userid
                            if 'openid' in tool_signature.parameters:
                                final_params_for_tool_agent['openid'] = openid

                        request_obj = {
                            "protocol_version": "1.0",
                            "task_id": str(uuid.uuid4()),
                            "requester": "RootAgent",
                            "target_agent": "ToolAgent",
                            "task": {
                                "objective": objective,
                                "tool_name": tool_name,
                                "params": final_params_for_tool_agent,
                                "context": {
                                    "userid": userid,
                                    "openid": openid,
                                    **current_situation
                                }
                            }
                        }
                        logger.info(f"Tool Call: {tool_name}")
                        logger.info(f"Tool Input Parameters: {final_params_for_tool_agent!r}")
                        tool_agent_response_json = agent.tool_agent.process_command(json.dumps(request_obj))
                        tool_agent_response = json.loads(tool_agent_response_json)

                        if tool_agent_response.get("status") == "success":
                            tool_output = tool_agent_response.get("result", {}).get("output", "")
                            logger.info(f"Tool Output Extracted: {tool_output!r}")
                        else:
                            error_message = tool_agent_response.get("result", {}).get("error_message", "Unknown error from ToolAgent")
                            raise ToolExecutionError(error_message)
                        
                        results.append({"tool_name": tool_name, "output": tool_output})

                        evaluation_prompt = f"""Given the following tool execution output and the expected outcome, determine if the step was successful.
Output: {tool_output}
Expected Outcome: {expect}
Was the step successful? Respond with 'YES' or 'NO'."""
                        evaluation_response_data = agent.llm.generate_content(evaluation_prompt, task_type="evaluation")
                        evaluation_response = evaluation_response_data["output_content"].strip().upper()
                        
                        if "YES" in evaluation_response:
                            step_successful = True
                            logger.info(f"Step {i+1} ({objective}): Expected outcome met.")
                            break
                        else:
                            error_message = f"Step {i+1} ({objective}): Expected outcome not met. LLM evaluation: {evaluation_response}"
                            logger.error(error_message)
                            retries += 1
                            if retries > max_retries:
                                # If retries exhausted, return failure for this step
                                return {
                                    "status": "failed",
                                    "error_message": error_message,
                                    "failed_step": step,
                                    "current_results": results,
                                    "llm_usage_metrics": agent.llm_usage_metrics
                                }
                    except Exception as e:
                        error_message = f"Error executing tool {tool_name}: {e}"
                        logger.error(error_message, exc_info=True)
                        retries += 1
                        if retries > max_retries:
                            # If retries exhausted, return failure for this step
                            return {
                                "status": "failed",
                                "error_message": error_message,
                                "failed_step": step,
                                "current_results": results,
                                "llm_usage_metrics": agent.llm_usage_metrics
                            }
            
            if not step_successful:
                # This part should ideally not be reached if the above return statements are hit
                # But as a fallback, if step_successful is still False, return failure
                return {
                    "status": "failed",
                    "error_message": f"Step {i+1} ({objective}) failed after all retries.",
                    "failed_step": step,
                    "current_results": results,
                    "llm_usage_metrics": agent.llm_usage_metrics
                }
        else:
            error_message = f"Tool {tool_name} not found for step {i+1}."
            logger.warning(error_message)
            results.append(f"Step {i+1} ({objective}): Error - Tool {tool_name} not found.")
            return {
                "status": "failed",
                "error_message": error_message,
                "failed_step": step,
                "current_results": results,
                "llm_usage_metrics": agent.llm_usage_metrics
            }

    # If all steps completed successfully
    agent.memory_manager_instance.add_memory(user_command, plan, results, success=True, userid=userid, openid=openid)
        
    formatted_results = []
    for res in results:
        formatted_results.append(f"Tool: {res['tool_name']}\nOutput: {res['output']}")
        
    save_task_data_func(agent, "result_log.txt", "\n".join(formatted_results))
    return {
        "status": "success",
        "final_result": f"Plan created and executed:\n{os.linesep.join(formatted_results)}",
        "llm_usage_metrics": agent.llm_usage_metrics
    }