import os
import shutil
import subprocess
from datetime import datetime
import re
import json
from loguru import logger
from tools.builtin.tools_func import web_search
from tools.builtin.web_fetch import _load_settings
from root_agent.llm import LLMInterface

class SelfEvolutionAgent:
    def __init__(self):
        path = os.getcwd() 
        self.settings = _load_settings()
        self.evolution_test_dir = path + "/EvolutionTest/"
        self.old_versions_dir = path + "/old_versions/"
        self.revolution_log_path = path + "/revolution_log.md"
        self.llm = LLMInterface()
        self._current_status = {} # Initialize current status

        os.makedirs(self.evolution_test_dir, exist_ok=True)
        os.makedirs(self.old_versions_dir, exist_ok=True)

    def _clean_llm_code_output(self, code_string: str) -> str:
        """
        Cleans the code output from the LLM and validates its syntax.
        Returns the cleaned code if valid, otherwise returns None.
        """
        cleaned_code = code_string.strip()
        code_match = re.search(r'```python\n(.*?)```', cleaned_code, re.DOTALL)
        if code_match:
            cleaned_code = code_match.group(1).strip()
        
        try:
            compile(cleaned_code, '<string>', 'exec')
            return cleaned_code
        except SyntaxError as e:
            print(f"Syntax error in LLM generated code: {e}")
            return None

    def _analyze_revolution_log(self) -> str:
        """
        Analyzes the revolution log to identify patterns and suggest improvements.
        """
        if not os.path.exists(self.revolution_log_path):
            return "Revolution log not found. No analysis to perform."

        with open(self.revolution_log_path, 'r') as f:
            log_content = f.read()

        analysis_prompt = f"""Analyze the following revolution log and identify key patterns.
Focus on:
- Frequently failing goals.
- Goals that consistently lead to successful benchmarks.
- Common reasons for benchmark failures.
- Suggestions for future evolution goals based on these patterns.

Log Content:
{log_content}

Analysis Summary:
"""
        analysis = self.llm.generate_content(analysis_prompt, task_type="analysis")
        return analysis.strip()

    def establish_evolution_goal(self, predefined_goal=None, insights: list[str] = None):
        """
        Identifies areas for optimization or new skills to acquire.
        Uses web search, LLM, and insights from GMemory to dynamically propose evolution goals.
        """
        log_analysis = self._analyze_revolution_log()
        if predefined_goal:
            goal = predefined_goal
        elif insights:
            prompt = f'''Based on the following insights from past tasks and analysis of the revolution log, propose a concise and actionable evolution goal.
Insights:
- {"\n- ".join(insights)}

Revolution Log Analysis:
{log_analysis}

Proposed Goal:'''
            goal = self.llm.generate_content(prompt, task_type="planning").strip()
            print(f"LLM proposed goal based on insights and log analysis: {goal}")
        else:
            print("Dynamically establishing evolution goal...")
            search_query = "latest advancements in AI agent development OR common pain points in LLM agents"
            search_results = web_search(query=search_query,userid=self.settings.get("userid"), openid=self.settings.get("openid"))
            
            web_content = ""

            # Safely extract the snippet
            results_list = search_results.get("google_web_search_response", {}).get("results", [])
            if results_list:
                web_content = results_list[0].get("snippet", "")

            if web_content:
                prompt = f"Based on the following web search results, propose a concise and actionable evolution goal for an AI assistant like SAIA. Focus on optimization or addressing common challenges:\n\n{web_content}\n\nProposed Goal:"
                llm_proposed_goal = self.llm.generate_content(prompt, task_type="planning")
                goal = llm_proposed_goal.strip()
                print(f"LLM proposed goal: {goal}")
            else:
                goal = "Optimize SAIA's planning accuracy based on recent trends."
                print(f"No relevant search results or an error occurred. Defaulting to goal: {goal}")

        print(f"Evolution Goal Established: {goal}")
        self._current_status["goal"] = goal
        self._current_status["status"] = "Goal Established"
        return goal


    def gather_information(self, goal):
        """
        Collects relevant knowledge and resources for the evolution goal.
        Uses web search and LLM to summarize information.
        """
        print(f"Gathering information for goal: '{goal}'...")
        search_query = f"information about {goal}"
        search_results = web_search(query=search_query,userid=self.settings.get("userid"), openid=self.settings.get("openid"))

        gathered_info = ""
        web_content = ""
 
        # Safely extract the snippet
        results_list = search_results.get("google_web_search_response", {}).get("results", [])
        if results_list:
            web_content = results_list[0].get("snippet", "")

        if web_content:
            prompt = f"Summarize the following information and extract key insights relevant to the goal '{goal}':\n\n{web_content}\n\nSummary and Key Insights:"
            llm_summary = self.llm.generate_content(prompt, task_type="planning")
            gathered_info = llm_summary.strip()
            print(f"LLM summarized information: {gathered_info}")
        else:
            gathered_info = f"No specific information found for '{goal}' or an error occurred during search."
            print(gathered_info)
        self._current_status["gathered_info"] = gathered_info
        self._current_status["status"] = "Information Gathered"
        return gathered_info

    def conduct_sandbox_experiment(self, goal, gathered_info, file_to_modify=None):
        """
        Creates an isolated environment to safely experiment with code modifications.
        Can create a new file, or modify an existing one (either from the main codebase
        or from a previous experiment). Includes robust error handling for file operations.
        """
        print(f"Conducting sandbox experiment for goal: '{goal}'...")
        experiment_id = datetime.now().strftime("%Y%m%d_%H%M%S")
        experiment_dir = os.path.join(self.evolution_test_dir, f"experiment_{experiment_id}")
        
        try:
            os.makedirs(experiment_dir, exist_ok=True)
        except OSError as e:
            return {"success": False, "path": experiment_dir, "message": f"Failed to create experiment directory: {e}"}

        # Heuristic to decide action based on goal
        is_new_file_task = any(keyword in goal.lower() for keyword in ["create", "integrate", "test new", "implement"])

        if is_new_file_task and not file_to_modify:
            # --- Create a new file for the experiment ---
            new_file_name = "test_integration.py"
            sandbox_file_path = os.path.join(experiment_dir, new_file_name)
            
            code_generation_prompt = f"""Based on the evolution goal: '{goal}' and the following gathered information:
'{gathered_info}'

Write a complete, executable, non-interactive Python script to achieve this goal.
The script should be self-contained, ready to run, and must not require user input.

Python Script:
"""
            generated_code = self.llm.generate_content(code_generation_prompt, task_type="code_generation")
            
            generated_code = self._clean_llm_code_output(generated_code)

            if not generated_code:
                return {"success": False, "path": experiment_dir, "message": "LLM generated syntactically incorrect code."}

            try:
                with open(sandbox_file_path, 'w', encoding='utf-8') as f:
                    f.write(generated_code)
            except IOError as e:
                return {"success": False, "path": experiment_dir, "message": f"Error writing to sandbox file: {e}"}
            
            print(f"Generated new script for experiment: {sandbox_file_path}")
            return {
                "success": True,
                "path": experiment_dir,
                "modified_file_path": sandbox_file_path,
                "original_source_path": None,
                "is_new_file": True,
                "message": "Sandbox experiment completed by creating a new test script."
            }
        else:
            # --- Modify an existing file ---
            if file_to_modify:
                source_file_path = file_to_modify
                target_file_name = os.path.basename(source_file_path)
            else:
                target_file_name = "rootAgent.py"
                source_file_path = os.path.join(os.getcwd(), "root_agent", target_file_name)

            sandbox_file_path = os.path.join(experiment_dir, target_file_name)

            try:
                shutil.copy(source_file_path, sandbox_file_path)
                print(f"Copied '{source_file_path}' to sandbox: '{sandbox_file_path}'")
            except (FileNotFoundError, OSError) as e:
                return {"success": False, "path": experiment_dir, "message": f"Error copying file to sandbox: {e}"}

            try:
                with open(sandbox_file_path, 'r', encoding='utf-8') as f:
                    original_code = f.read()
            except IOError as e:
                return {"success": False, "path": experiment_dir, "message": f"Error reading sandbox file: {e}"}

            code_modification_prompt = f"""Based on the evolution goal: '{goal}' and the following gathered information:
'{gathered_info}'

Modify the following Python code to achieve the goal. The resulting script must be non-interactive.
Provide the FULL modified code, not just the changed parts.
Ensure the code is complete, syntactically correct, and all strings are properly closed.

Original Code from '{target_file_name}':
```python
{original_code}
```

Modified Code:
"""
            modified_code = self.llm.generate_content(code_modification_prompt, task_type="code_generation")

            modified_code = self._clean_llm_code_output(modified_code)

            if not modified_code:
                return {"success": False, "path": experiment_dir, "message": "LLM generated syntactically incorrect code for modification."}

            try:
                with open(sandbox_file_path, 'w', encoding='utf-8') as f:
                    f.write(modified_code)
            except IOError as e:
                return {"success": False, "path": experiment_dir, "message": f"Error writing to sandbox file: {e}"}

            print(f"Modified code written to sandbox file: {sandbox_file_path}")

            self._current_status["experiment_results"] = {
                "success": True,
                "path": experiment_dir,
                "modified_file_path": sandbox_file_path,
                "original_source_path": source_file_path,
                "is_new_file": False,
                "message": "Sandbox experiment completed with direct code modification."
            }
            self._current_status["status"] = "Sandbox Experiment Conducted"
            return self._current_status["experiment_results"]

    def run_benchmark_tests(self, experiment_results):
        """
        Evaluates the code in the sandbox by executing it as a script.
        This is a generalized approach for any .py file.
        """
        print(f"Running benchmark tests for experiment in: {experiment_results['path']}...")
        if not experiment_results.get("success"):
            print("Benchmark tests skipped: Sandbox experiment failed.")
            return {"passed": False, "details": "Sandbox experiment failed."}

        modified_file_path = experiment_results['modified_file_path']
        
        # Define the path to the benchmark script
        benchmark_script_path = os.path.join(self.evolution_test_dir, "benchmark_test_list_files.py")

        # --- Run the baseline benchmark ---
        baseline_output = ""
        try:
            baseline_result = subprocess.run(
                ["python", benchmark_script_path],
                capture_output=True,
                text=True,
                check=True,
                timeout=60
            )
            baseline_output = baseline_result.stdout.strip()
            print(f"Baseline benchmark output:\n{baseline_output}")
        except Exception as e:
            error_message = f"Baseline benchmark failed: {e}"
            print(error_message)
            return {"passed": False, "details": error_message}

        # --- Run the experiment code and compare ---
        experiment_output = ""
        try:
            experiment_result = subprocess.run(
                ["python", modified_file_path],
                capture_output=True,
                text=True,
                check=True,
                timeout=60
            )
            experiment_output = experiment_result.stdout.strip()
            print(f"Experiment code output:\n{experiment_output}")
        except subprocess.CalledProcessError as e:
            error_message = f"Experiment failed: Script execution failed with exit code {e.returncode}.\nStderr:\n{e.stderr}"
            print(error_message)
            return {"passed": False, "details": error_message}
        except subprocess.TimeoutExpired as e:
            error_message = f"Experiment failed: Script execution timed out after {e.timeout} seconds."
            print(error_message)
            return {"passed": False, "details": error_message}
        except Exception as e:
            error_message = f"Experiment failed: An unexpected error occurred during script execution: {e}"
            print(error_message)
            import traceback
            return {"passed": False, "details": f"{error_message}\n{traceback.format_exc()}"}

        # --- Compare results ---
        if experiment_output == baseline_output:
            details = f"Benchmark passed: Experiment output matches baseline output.\nBaseline:\n{baseline_output}\nExperiment:\n{experiment_output}"
            print(details)
            self._current_status["benchmark_results"] = {"passed": True, "details": details}
            self._current_status["status"] = "Benchmark Tests Passed"
            return self._current_status["benchmark_results"]
        else:
            details = f"Benchmark failed: Experiment output does NOT match baseline output.\nBaseline:\n{baseline_output}\nExperiment:\n{experiment_output}"
            print(details)
            return {"passed": False, "details": details}

    def deploy_and_archive(self, goal, experiment_results, benchmark_results):
        """
        Integrates successful changes into the main codebase and archives old versions.
        If the experiment was to create a new file, it only logs the success.
        """
        if not benchmark_results.get("passed"):
            print("Deployment skipped: Benchmarks did not pass.")
            return

        # If the experiment was just creating a new test file, no deployment is needed.
        if experiment_results.get("is_new_file"):
            print("Deployment skipped: Experiment was for creating a new file, no changes to deploy to main codebase.")
            log_entry = f"""## Evolution Log: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
- **Goal**: {goal}
- **Status**: SUCCESS (Benchmark Passed)
- **Details**: {benchmark_results['details']}
- **Action**: Sandbox test successful for a newly created file. No deployment action taken.
---
"""
            with open(self.revolution_log_path, "a", encoding='utf-8') as f:
                f.write(log_entry)
            print(f"Updated revolution log for new file experiment: {self.revolution_log_path}")
        self._current_status["deployment_status"] = "Skipped (New File Experiment)"
        self._current_status["status"] = "Deployment Skipped"
        return

        print(f"Deploying changes for goal: '{goal}'...")

        source_to_deploy = experiment_results["modified_file_path"]
        original_destination = experiment_results["original_source_path"]

        # --- Backup the original file ---
        backup_dir = os.path.join(self.old_versions_dir, datetime.now().strftime("backup_%Y%m%d_%H%M%S"))
        os.makedirs(backup_dir, exist_ok=True)
        
        # Check if original_destination is valid before creating backup path
        if not original_destination or not isinstance(original_destination, str):
             print(f"Error: Invalid original destination path provided for backup.")
             return

        backup_file_path = os.path.join(backup_dir, os.path.basename(original_destination) + ".bak")

        try:
            shutil.copyfile(original_destination, backup_file_path)
            print(f"Backed up '{original_destination}' to '{backup_file_path}'")
        except FileNotFoundError:
            print(f"Warning: Original file '{original_destination}' not found for backup. Skipping.")
        except Exception as e:
            print(f"Error backing up original file: {e}")
            return # Do not proceed with deployment if backup fails

        # --- Deploy new code (copy modified file from sandbox) ---
        deployment_successful = False
        try:
            shutil.copyfile(source_to_deploy, original_destination)
            print(f"Deployed modified code from '{source_to_deploy}' to '{original_destination}'")
            deployment_successful = True
        except Exception as e:
            print(f"Error deploying new code: {e}")
            print("Attempting to rollback...")
            self._rollback(backup_file_path, original_destination)
            return

        # --- Git operations ---
        if deployment_successful:
            try:
                subprocess.run(["git", "add", original_destination], check=True)
                commit_message = f"feat(evolution): {goal}"
                subprocess.run(["git", "commit", "-m", commit_message], check=True)
                print(f"Committed changes to git with message: '{commit_message}'")
            except (subprocess.CalledProcessError, FileNotFoundError) as e:
                print(f"Git operation failed: {e}. Please commit changes manually.")

        # --- Update revolution_log.md ---
        log_entry = f"""## Evolution Log: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
- **Goal**: {goal}
- **Status**: {'SUCCESS' if deployment_successful else 'FAILURE'}
- **Details**: {benchmark_results['details']}
- **Action**: Deployed changes from '{source_to_deploy}' to '{original_destination}'. Original file backed up to '{backup_file_path}'.
---
"""
        with open(self.revolution_log_path, "a", encoding='utf-8') as f:
            f.write(log_entry)
        print(f"Updated revolution log: {self.revolution_log_path}")
        self._current_status["deployment_status"] = {'SUCCESS' if deployment_successful else 'FAILURE'}
        self._current_status["status"] = "Deployment Complete"

    def _rollback(self, backup_path, original_path):
        print(f"Rolling back changes from {backup_path} to {original_path}...")
        try:
            shutil.copyfile(backup_path, original_path)
            print("Rollback successful.")
        except Exception as e:
            print(f"Error during rollback: {e}")

    def self_reflect(self, evolution_goal, benchmark_results, gathered_info):
        """
        Analyzes the results of the evolution cycle and generates lessons learned or future tasks.
        """
        print("Conducting self-reflection...")
        log_analysis = self._analyze_revolution_log()
        reflection_prompt = f"""Based on the following evolution cycle and historical analysis:

Evolution Goal: {evolution_goal}
Benchmark Results: {benchmark_results}
Gathered Information: {gathered_info}
Historical Analysis from revolution_log.md:
{log_analysis}

Analyze the outcome and provide:
1. Lessons Learned (what worked, what didn't, why)
2. Potential Future Evolution Tasks (specific, actionable tasks to further improve SAIA).
   For "Potential Future Evolution Tasks", provide them in JSON format, as a list of objects, each with "objective", "priority" (High, Medium, Low), "estimated_effort" (e.g., "1 hour", "1 day"), and "dependencies" (list of strings).
   Example:
   ```json
   [
     {{
       "objective": "Improve planning accuracy for complex tasks",
       "priority": "High",
       "estimated_effort": "3 days",
       "dependencies": ["Enhanced memory retrieval"]
     }},
     {{
       "objective": "Integrate new web scraping tool",
       "priority": "Medium",
       "estimated_effort": "1 day",
       "dependencies": []
     }}
   ]
   ```
   For "Lessons Learned", provide them in Markdown format.
"""
        reflection_output = self.llm.generate_content(reflection_prompt, task_type="reflection")
        
        lessons_learned_match = re.search(r'Lessons Learned:\n(.*?)Potential Future Evolution Tasks:', reflection_output, re.DOTALL)
        lessons_learned = lessons_learned_match.group(1).strip() if lessons_learned_match else "No specific lessons learned provided."

        tasks_json_match = re.search(r'```json\n(.*?)```', reflection_output, re.DOTALL)
        evolution_tasks = []
        if tasks_json_match:
            try:
                evolution_tasks = json.loads(tasks_json_match.group(1).strip())
                # Store evolution tasks to a file
                evolution_tasks_file = os.path.join(self.evolution_test_dir, "evolution_tasks.json")
                with open(evolution_tasks_file, "w") as f:
                    json.dump(evolution_tasks, f, indent=2)
                print(f"Evolution tasks saved to: {evolution_tasks_file}")
            except json.JSONDecodeError as e:
                print(f"Error decoding evolution tasks JSON: {e}")
        else:
            print("No structured evolution tasks found in reflection output.")

        reflection_log_entry = f"\n## Self-Reflection for Evolution Cycle: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
        reflection_log_entry += f"**Evolution Goal**: {evolution_goal}\n"
        reflection_log_entry += f"**Benchmark Status**: {'PASSED' if benchmark_results['passed'] else 'FAILED'}\n"
        reflection_log_entry += f"**Details**: {benchmark_results['details']}\n"
        reflection_log_entry += f"\n### Lessons Learned:\n{lessons_learned}\n\n"
        if evolution_tasks:
            reflection_log_entry += f"### Potential Future Evolution Tasks:\n```json\n{json.dumps(evolution_tasks, indent=2)}\n```\n\n"
        else:
            reflection_log_entry += f"### Potential Future Evolution Tasks:\nNo structured tasks generated.\n\n"

        with open(self.revolution_log_path, "a") as f:
            f.write(reflection_log_entry)
        print(f"Self-reflection logged to: {self.revolution_log_path}")
        self._current_status["reflection_results"] = {
            "lessons_learned": lessons_learned,
            "evolution_tasks": evolution_tasks
        }
        self._current_status["status"] = "Self-Reflection Complete"

    def process_command(self, request_json: str) -> str:
        """
        Orchestrates the entire self-evolution process.
        """
        logger.info(f"SelfEvolutionAgent received request: {request_json}")
        
        try:
            request = json.loads(request_json)
            task = request.get("task", {})
            goal = task.get("goal")
            insights = task.get("insights")
            file_to_modify = task.get("file_to_modify")
            task_id = request.get("task_id")
        except json.JSONDecodeError as e:
            return f"Error: Invalid JSON format in request_json: {e}"
        except Exception as e:
            return f"Error processing request_json: {e}"

        print("\n--- Starting Self-Evolution Process ---")
        self._current_status = {
            "status": "Starting",
            "timestamp": datetime.now().isoformat(),
            "goal": None,
            "gathered_info": None,
            "experiment_results": None,
            "benchmark_results": None,
            "deployment_status": None,
            "reflection_results": None
        }

            
        
        evolution_goal = self.establish_evolution_goal(goal, insights)
        gathered_info = self.gather_information(evolution_goal)
        experiment_results = self.conduct_sandbox_experiment(evolution_goal, gathered_info, file_to_modify)
        benchmark_results = self.run_benchmark_tests(experiment_results)
        self.deploy_and_archive(evolution_goal, experiment_results, benchmark_results)
        self.self_reflect(evolution_goal, benchmark_results, gathered_info)
        self._current_status["status"] = "Finished"
        self._current_status["timestamp"] = datetime.now().isoformat()
        print("--- Self-Evolution Process Finished ---\n")

    def get_status(self):
        """Returns the current evolution status."""
        return self._current_status

if __name__ == "__main__":
    agent = SelfEvolutionAgent()
    agent.run_evolution(goal="Improve SAIA's code generation accuracy.")
