import os
import paramiko
import json
import logging
import re
from io import StringIO
import time
import shutil
import glob
from pathlib import Path
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("kylin_optimizer.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class KylinAutoOptimizer:
    def __init__(self, host, username, private_key_path,
                 remote_workspace="/opt/kylin_optimizer",
                 local_result_dir="python/ansible-getsystem",
                 ansible_scripts_dir="agent-mcp/Ansible"):  # 新增Ansible脚本目录
        """
        初始化自动优化器
        :param host: 麒麟服务器IP地址
        :param username: SSH用户名
        :param private_key_path: SSH私钥路径
        :param remote_workspace: 麒麟服务器上的工作目录
        :param local_result_dir: 本地结果存储目录
        :param ansible_scripts_dir: Ansible脚本目录路径
        """
        self.host = "192.168.100.135"
        self.username = "root"
        self.private_key_path = r"C:\Users\32793\.ssh\id_rsa"
        self.remote_workspace = "/opt/kylin_optimizer"
        self.local_result_dir = local_result_dir  # 存储本地结果目录
        self.ansible_scripts_dir = ansible_scripts_dir  # Ansible脚本目录
        self.ssh = None
        self.sftp = None
        self._connect()

        # 确保远程目录存在
        self._exec_command(f"mkdir -p {self.remote_workspace}/generated_scripts")
        self._exec_command(f"mkdir -p {self.remote_workspace}/results")

        # 确保本地目录存在
        os.makedirs(self.local_result_dir, exist_ok=True)

    def _connect(self):
        """建立SSH和SFTP连接"""
        try:
            self.ssh = paramiko.SSHClient()
            self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            private_key = paramiko.RSAKey.from_private_key_file(self.private_key_path)
            self.ssh.connect(
                hostname=self.host,
                username=self.username,
                pkey=private_key,
                timeout=30
            )
            self.sftp = self.ssh.open_sftp()
            logger.info(f"成功连接到服务器: {self.host}")
        except Exception as e:
            logger.error(f"SSH连接失败: {e}")
            raise

    def _exec_command(self, command, timeout=300):
        """在远程服务器执行命令并返回结果"""
        try:
            logger.info(f"执行远程命令: {command}")
            stdin, stdout, stderr = self.ssh.exec_command(command, timeout=timeout)
            exit_status = stdout.channel.recv_exit_status()
            output = stdout.read().decode('utf-8').strip()
            error = stderr.read().decode('utf-8').strip()

            if exit_status != 0:
                logger.error(f"命令执行失败 (状态码 {exit_status}): {error}")
                return {
                    'success': False,
                    'exit_code': exit_status,
                    'stdout': output,
                    'stderr': error
                }

            return {
                'success': True,
                'stdout': output,
                'stderr': error
            }
        except Exception as e:
            logger.error(f"远程命令执行异常: {e}")
            return {
                'success': False,
                'error': str(e)
            }

    def _sftp_upload_string(self, content, remote_path):
        """通过SFTP上传字符串内容到远程文件"""
        try:
            # 使用StringIO创建临时文件对象
            with self.sftp.file(remote_path, 'w') as remote_file:
                remote_file.write(content)
            logger.info(f"文件上传成功: {remote_path}")
            return True
        except Exception as e:
            logger.error(f"文件上传失败: {e}")
            return False

    def _sftp_download_file(self, remote_path, local_path):
        """从远程服务器下载文件到本地"""
        try:
            # 确保本地目录存在
            os.makedirs(os.path.dirname(local_path), exist_ok=True)

            self.sftp.get(remote_path, local_path)
            logger.info(f"文件下载成功: {remote_path} -> {local_path}")
            return True
        except Exception as e:
            logger.error(f"文件下载失败: {e}")
            return False

    def _get_latest_ansible_scripts(self):
        """
        获取Ansible目录下最新日期文件夹中的所有脚本
        :return: 脚本文件路径列表，按文件名排序
        """
        try:
            # 获取Ansible目录下的所有日期文件夹
            # 尝试多种可能的路径
            possible_paths = [
                Path(self.ansible_scripts_dir),  # 相对路径
                Path.cwd() / self.ansible_scripts_dir,  # 当前工作目录 + 相对路径
                Path(__file__).parent.parent / self.ansible_scripts_dir,  # 脚本所在目录的上级目录
            ]

            ansible_path = None
            for path in possible_paths:
                if path.exists():
                    ansible_path = path
                    logger.info(f"找到Ansible目录: {ansible_path}")
                    break

            if ansible_path is None:
                logger.error(f"Ansible目录不存在，尝试的路径: {[str(p) for p in possible_paths]}")
                return []

            # 获取所有日期文件夹（格式：YYYYMMDDHHMMSS）
            date_dirs = [d for d in ansible_path.iterdir() if d.is_dir() and d.name.isdigit()]

            if not date_dirs:
                logger.warning(f"在 {self.ansible_scripts_dir} 中未找到日期文件夹")
                return []

            # 按文件夹名称排序，获取最新的日期文件夹
            latest_date_dir = sorted(date_dirs, key=lambda x: x.name, reverse=True)[0]
            logger.info(f"找到最新日期目录: {latest_date_dir}")

            # 获取该目录下的所有.sh脚本文件
            script_files = list(latest_date_dir.glob("*.sh"))

            if not script_files:
                logger.warning(f"在 {latest_date_dir} 中未找到.sh脚本文件")
                return []

            # 按文件名排序
            script_files.sort(key=lambda x: x.name)
            logger.info(f"找到 {len(script_files)} 个脚本文件: {[f.name for f in script_files]}")

            return script_files

        except Exception as e:
            logger.error(f"获取最新Ansible脚本失败: {e}")
            return []

    def _read_script_content(self, script_path):
        """
        读取脚本文件内容
        :param script_path: 脚本文件路径
        :return: 脚本内容字符串
        """
        try:
            with open(script_path, 'r', encoding='utf-8') as f:
                content = f.read()
            logger.info(f"成功读取脚本: {script_path.name}")
            return content
        except Exception as e:
            logger.error(f"读取脚本失败 {script_path}: {e}")
            return None

    def _wait_for_container_completion(self, container_name, timeout=600):
        """等待容器执行完成"""
        start_time = time.time()
        while time.time() - start_time < timeout:
            # 检查容器状态
            result = self._exec_command(f"docker inspect -f '{{{{.State.Status}}}}' {container_name}")

            if not result['success']:
                logger.warning(f"检查容器状态失败: {result.get('stderr', '')}")
                time.sleep(5)
                continue

            status = result['stdout'].lower()

            if status in ['exited', 'dead']:
                logger.info(f"容器已完成执行，状态: {status}")
                return True
            elif status in ['running', 'restarting']:
                logger.info(f"容器仍在运行，状态: {status}")
                time.sleep(10)
            else:
                logger.warning(f"未知容器状态: {status}")
                time.sleep(10)

        logger.error(f"等待容器完成超时 ({timeout}秒)")
        return False

    def upload_and_run_scripts(self, test_script, optimization_scripts=None, test_id=None):
        """
        上传脚本并启动自动化测试流程
        :param test_script: 测试脚本内容
        :param optimization_scripts: 优化脚本内容列表，如果为None则自动获取最新脚本
        :param test_id: 测试标识符（用于结果文件命名）
        :return: 测试结果字典
        """
        # 生成测试ID - 使用当前时间格式 YYYYMMDDHHMMSS
        if not test_id:
            current_time = datetime.now()
            test_id = current_time.strftime("%Y%m%d%H%M%S")
            logger.info(f"生成测试ID: {test_id}")
        
        # 创建以当前时间命名的结果目录
        current_time = datetime.now()
        result_dir_name = current_time.strftime("%Y%m%d%H%M%S")
        remote_result_dir = f"{self.remote_workspace}/results/{result_dir_name}"
        # 使用绝对路径存放本地结果
        abs_base_dir = r"E:\code\python_code\PythonProject18_8_9_14_06\PythonProject18\ansible_python\python\ansible-getsystem"
        local_result_dir = os.path.join(abs_base_dir, result_dir_name)
        
        # 确保远程和本地结果目录存在
        self._exec_command(f"mkdir -p {remote_result_dir}")
        os.makedirs(local_result_dir, exist_ok=True)
        
        logger.info(f"创建结果目录: 远程={remote_result_dir}, 本地={local_result_dir}")

        # 如果没有提供优化脚本，则自动获取最新的Ansible脚本
        if optimization_scripts is None:
            script_files = self._get_latest_ansible_scripts()
            if not script_files:
                return {"status": "error", "message": "未找到可用的优化脚本"}

            optimization_scripts = []
            for script_file in script_files:
                script_content = self._read_script_content(script_file)
                if script_content:
                    optimization_scripts.append({
                        'name': script_file.name,
                        'content': script_content
                    })

            logger.info(f"自动获取到 {len(optimization_scripts)} 个优化脚本")

        # 确保optimization_scripts是列表格式
        if isinstance(optimization_scripts, str):
            optimization_scripts = [{'name': 'optimization_script.sh', 'content': optimization_scripts}]
        elif isinstance(optimization_scripts, dict):
            optimization_scripts = [optimization_scripts]

        # 保存测试脚本到远程服务器
        test_script_path = f"{self.remote_workspace}/generated_scripts/{test_id}_test_script.py"
        if not self._sftp_upload_string(test_script, test_script_path):
            return {"status": "error", "message": "测试脚本上传失败"}

        # 保存所有优化脚本到远程服务器
        opt_script_paths = []
        for i, script_info in enumerate(optimization_scripts):
            script_name = script_info.get('name', f'optimization_script_{i + 1}.sh')
            script_content = script_info.get('content', '')

            opt_script_path = f"{self.remote_workspace}/generated_scripts/{test_id}_{script_name}"
            if not self._sftp_upload_string(script_content, opt_script_path):
                return {"status": "error", "message": f"优化脚本 {script_name} 上传失败"}

            opt_script_paths.append(opt_script_path)
            logger.info(f"上传优化脚本: {script_name}")

        # 执行所有优化脚本
        all_results = []
        for i, opt_script_path in enumerate(opt_script_paths):
            script_name = optimization_scripts[i].get('name', f'optimization_script_{i + 1}.sh')
            logger.info(f"执行优化脚本 {i + 1}/{len(opt_script_paths)}: {script_name}")

            # 执行Ansible Playbook
            playbook_path = f"{self.remote_workspace}/playbook.yml"
            cmd = (
                f"cd {self.remote_workspace} && "
                f"ansible-playbook -i localhost, {playbook_path} "
                f"--extra-vars \"test_script_path={test_script_path} "
                f"optimization_script_path={opt_script_path} "
                f"test_id={test_id}_script_{i + 1}\""
            )
            ansible_result = self._exec_command(cmd, timeout=1200)  # 20分钟超时

            if not ansible_result['success']:
                logger.error(f"脚本 {script_name} 执行失败")
                all_results.append({
                    "script_name": script_name,
                    "status": "error",
                    "message": "Ansible执行失败",
                    "details": ansible_result
                })
                continue

            # 等待容器执行完成
            container_name = f"kylin-sandbox-{test_id}_script_{i + 1}"
            if not self._wait_for_container_completion(container_name):
                logger.error(f"脚本 {script_name} 容器执行超时")
                all_results.append({
                    "script_name": script_name,
                    "status": "error",
                    "message": "容器执行超时"
                })
                continue

            # 获取容器日志
            logs_result = self._exec_command(f"docker logs {container_name}")
            if not logs_result['success']:
                container_logs = "无法获取容器日志"
            else:
                container_logs = logs_result['stdout']

            # 下载性能数据文件
            perf_files = {
                "before": f"{self.remote_workspace}/results/{test_id}_script_{i + 1}/perf_data/before_optimization.txt",
                "after": f"{self.remote_workspace}/results/{test_id}_script_{i + 1}/perf_data/after_optimization.txt"
            }

            # 获取脚本名称（去掉.sh扩展名）
            script_name_without_ext = script_name.replace('.sh', '')
            
            for name, remote_path in perf_files.items():
                # 使用时间+脚本名称的格式命名文件
                local_filename = f"{test_id}_{script_name_without_ext}_{name}.txt"
                # 拼接绝对路径
                abs_base_dir = r"E:\code\python_code\PythonProject18_8_9_14_06\PythonProject18\ansible_python\python\ansible-getsystem"
                local_path = os.path.join(abs_base_dir, result_dir_name, local_filename)
                self._sftp_download_file(remote_path, local_path)

            # 尝试获取性能指标
            performance_metrics = {}
            if "===== 性能指标 =====" in container_logs:
                metrics_part = container_logs.split("===== 性能指标 =====")[1]
                try:
                    # 尝试解析JSON格式的性能指标
                    metrics_json = re.search(r'\{.*\}', metrics_part, re.DOTALL)
                    if metrics_json:
                        performance_metrics = json.loads(metrics_json.group())
                except json.JSONDecodeError:
                    # 如果不是JSON格式，保存原始文本
                    performance_metrics = {"raw_metrics": metrics_part.strip()}

            # 保存单个脚本的结果
            script_result = {
                "script_name": script_name,
                "status": "completed",
                "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
                "container_logs": container_logs,
                "performance_metrics": performance_metrics,
                "ansible_output": ansible_result['stdout'],
                "local_perf_files": {
                    "before": os.path.join(r"E:\code\python_code\PythonProject18_8_9_14_06\PythonProject18\ansible_python\python\ansible-getsystem", result_dir_name, f"{test_id}_{script_name_without_ext}_before.txt"),
                    "after": os.path.join(r"E:\code\python_code\PythonProject18_8_9_14_06\PythonProject18\ansible_python\python\ansible-getsystem", result_dir_name, f"{test_id}_{script_name_without_ext}_after.txt")
                }
            }

            # 上传单个脚本结果到远程
            script_result_path = f"{self.remote_workspace}/results/{test_id}_script_{i + 1}_result.json"
            self._sftp_upload_string(json.dumps(script_result, indent=2), script_result_path)

            all_results.append(script_result)
            logger.info(f"脚本 {script_name} 执行完成")

        # 汇总所有结果
        result_data = {
            "test_id": test_id,
            "result_directory": result_dir_name,
            "total_scripts": len(optimization_scripts),
            "completed_scripts": len([r for r in all_results if r.get('status') == 'completed']),
            "failed_scripts": len([r for r in all_results if r.get('status') == 'error']),
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "script_results": all_results,
            "local_result_dir": local_result_dir,
            "remote_result_dir": remote_result_dir
        }

        # 保存汇总结果到文件
        result_path = f"{remote_result_dir}/summary_result.json"
        self._sftp_upload_string(json.dumps(result_data, indent=2), result_path)
        
        # 同时在本地保存一份汇总结果
        local_summary_path = os.path.join(r"E:\code\python_code\PythonProject18_8_9_14_06\PythonProject18\ansible_python\python\ansible-getsystem", result_dir_name, "summary_result.json")
        with open(local_summary_path, 'w', encoding='utf-8') as f:
            json.dump(result_data, f, indent=2, ensure_ascii=False)
        
        logger.info(f"汇总结果已保存: 远程={result_path}, 本地={local_summary_path}")

        return result_data

    def format_result_for_llm(self, result_data):
        """
        格式化测试结果返回给LLM
        :param result_data: 测试结果字典
        :return: 格式化后的结果字符串
        """
        if result_data.get('status') == 'error':
            return f"测试失败: {result_data.get('message', '未知错误')}"

        response = [
            f"测试ID: {result_data['test_id']}",
            f"结果目录: {result_data.get('result_directory', 'N/A')}",
            f"总脚本数: {result_data.get('total_scripts', 0)}",
            f"成功执行: {result_data.get('completed_scripts', 0)}",
            f"执行失败: {result_data.get('failed_scripts', 0)}",
            f"完成时间: {result_data['timestamp']}",
            f"本地结果目录: {result_data.get('local_result_dir', 'N/A')}",
            ""
        ]

        # 处理多个脚本的结果
        script_results = result_data.get('script_results', [])
        if not script_results:
            response.append("未找到脚本执行结果")
            return "\n".join(response)

        for i, script_result in enumerate(script_results, 1):
            script_name = script_result.get('script_name', f'脚本{i}')
            status = script_result.get('status', 'unknown')

            response.append(f"===== 脚本 {i}: {script_name} =====")
            response.append(f"状态: {status}")

            if status == 'completed':
                response.append(f"完成时间: {script_result.get('timestamp', 'N/A')}")

                # 添加容器日志摘要
                logs = script_result.get('container_logs', '').splitlines()
                if len(logs) > 20:
                    response.extend(logs[:10])
                    response.append(f"... 省略 {len(logs) - 20} 行 ...")
                    response.extend(logs[-10:])
                else:
                    response.extend(logs)

                # 添加性能指标
                performance_metrics = script_result.get('performance_metrics', {})
                if performance_metrics:
                    response.append("\n----- 性能指标 -----")
                    if isinstance(performance_metrics, dict):
                        for k, v in performance_metrics.items():
                            response.append(f"{k}: {v}")
                    else:
                        response.append(str(performance_metrics))
            else:
                response.append(f"错误信息: {script_result.get('message', '未知错误')}")

            response.append("")  # 空行分隔

        # 添加优化建议请求
        response.append("===== 分析建议 =====")
        response.append("请根据以上测试结果，分析各个优化脚本的效果并提供进一步的调优建议。")
        response.append("如果需要修改优化脚本，请生成新的优化脚本。")

        return "\n".join(response)

    def automated_optimization_test(self, test_script, optimization_scripts=None):
        """
        全自动化优化测试流程
        :param test_script: 测试脚本内容
        :param optimization_scripts: 优化脚本内容，如果为None则自动获取最新脚本
        :return: 格式化后的结果字符串
        """
        logger.info("开始自动化优化测试流程")

        # 步骤1: 上传并运行脚本 - 使用当前时间格式作为test_id
        current_time = datetime.now()
        test_id = current_time.strftime("%Y%m%d%H%M%S")
        logger.info(f"使用测试ID: {test_id}")
        result_data = self.upload_and_run_scripts(test_script, optimization_scripts, test_id)

        # 步骤2: 格式化结果
        formatted_result = self.format_result_for_llm(result_data)

        logger.info("自动化测试流程完成")
        return formatted_result

    def close(self):
        """关闭连接"""
        if self.sftp:
            self.sftp.close()
        if self.ssh:
            self.ssh.close()
        logger.info("连接已关闭")


# LLM接口函数 - 供外部系统调用
def kylin_optimization_test(test_script, optimization_scripts=None):
    """
    提供给LLM调用的自动化优化测试接口
    :param test_script: LLM生成的测试脚本
    :param optimization_scripts: LLM生成的优化脚本，如果为None则自动获取最新脚本
    :return: 格式化后的测试结果
    """
    # 配置连接参数（实际应用中应从环境变量或配置文件中读取）
    SERVER_IP = "192.168.1.100"  # 麒麟服务器IP
    SSH_USER = "kylin_user"
    PRIVATE_KEY_PATH = "/path/to/private_key"  # 私钥路径
    LOCAL_RESULT_DIR = "python/ansible-getsystem"  # 本地结果目录
    ANSIBLE_SCRIPTS_DIR = "agent-mcp/Ansible"  # Ansible脚本目录

    optimizer = None
    try:
        optimizer = KylinAutoOptimizer(
            host=SERVER_IP,
            username=SSH_USER,
            private_key_path=PRIVATE_KEY_PATH,
            local_result_dir=LOCAL_RESULT_DIR,  # 传入本地结果目录
            ansible_scripts_dir=ANSIBLE_SCRIPTS_DIR  # 传入Ansible脚本目录
        )

        # 执行全自动化测试
        return optimizer.automated_optimization_test(test_script, optimization_scripts)

    except Exception as e:
        logger.error(f"自动化测试失败: {e}")
        return f"自动化测试失败: {str(e)}"

    finally:
        if optimizer:
            optimizer.close()



# 使用示例
# if __name__ == "__main__":
#     test_script = """#!/usr/bin/env python3
# import time
# import json
# import psutil
# import sys
#
# print("开始性能测试...")
# start_time = time.time()
#
# # 模拟不同负载
# results = []
# for i in range(5):
#     print(f"测试迭代 {i+1}/5")
#     iter_start = time.time()
#
#     # CPU密集型任务
#     total = 0
#     for j in range(10**7):
#         total += j * 0.0001
#
#     # 内存分配
#     data = [0] * 10**6
#
#     # 获取资源使用情况
#     iter_time = time.time() - iter_start
#     cpu_percent = psutil.cpu_percent(interval=0.1)
#     mem_usage = psutil.Process().memory_info().rss / (1024 * 1024)  # MB
#
#     results.append({
#         "iter": i+1,
#         "time": iter_time,
#         "cpu": cpu_percent,
#         "memory": mem_usage
#     })
#     time.sleep(1)  # 间隔
#
# # 计算总体指标
# total_time = time.time() - start_time
# avg_time = sum(r['time'] for r in results) / len(results)
# avg_cpu = sum(r['cpu'] for r in results) / len(results)
# max_memory = max(r['memory'] for r in results)
#
# print("\\n===== 测试摘要 =====")
# print(f"总执行时间: {total_time:.2f}秒")
# print(f"平均迭代时间: {avg_time:.2f}秒")
# print(f"平均CPU使用率: {avg_cpu:.1f}%")
# print(f"最大内存使用: {max_memory:.2f}MB")
#
# # 结构化输出性能指标
# metrics = {
#     "total_time": total_time,
#     "avg_iter_time": avg_time,
#     "avg_cpu_usage": avg_cpu,
#     "max_memory_mb": max_memory,
#     "iterations": results
# }
#
# print("\\n===== 性能指标 =====")
# print(json.dumps(metrics, indent=2))
# sys.exit(0)
# """
#
#     # 调用自动化测试接口 - 自动获取最新脚本
#     print("开始自动化测试，将自动获取最新的Ansible脚本...")
#     result = kylin_optimization_test(test_script)  # 不传入optimization_scripts，自动获取
#
#     print("\n自动化测试结果:")
#     print(result)





def main_1():
    test_script = """#!/usr/bin/env python3
    import time
    import json
    import psutil
    import sys

    print("开始性能测试...")
    start_time = time.time()

    # 模拟不同负载
    results = []
    for i in range(5):
        print(f"测试迭代 {i+1}/5")
        iter_start = time.time()

        # CPU密集型任务
        total = 0
        for j in range(10**7):
            total += j * 0.0001

        # 内存分配
        data = [0] * 10**6

        # 获取资源使用情况
        iter_time = time.time() - iter_start
        cpu_percent = psutil.cpu_percent(interval=0.1)
        mem_usage = psutil.Process().memory_info().rss / (1024 * 1024)  # MB

        results.append({
            "iter": i+1,
            "time": iter_time,
            "cpu": cpu_percent,
            "memory": mem_usage
        })
        time.sleep(1)  # 间隔

    # 计算总体指标
    total_time = time.time() - start_time
    avg_time = sum(r['time'] for r in results) / len(results)
    avg_cpu = sum(r['cpu'] for r in results) / len(results)
    max_memory = max(r['memory'] for r in results)

    print("\\n===== 测试摘要 =====")
    print(f"总执行时间: {total_time:.2f}秒")
    print(f"平均迭代时间: {avg_time:.2f}秒")
    print(f"平均CPU使用率: {avg_cpu:.1f}%")
    print(f"最大内存使用: {max_memory:.2f}MB")

    # 结构化输出性能指标
    metrics = {
        "total_time": total_time,
        "avg_iter_time": avg_time,
        "avg_cpu_usage": avg_cpu,
        "max_memory_mb": max_memory,
        "iterations": results
    }

    print("\\n===== 性能指标 =====")
    print(json.dumps(metrics, indent=2))
    sys.exit(0)
    """

    # 调用自动化测试接口 - 自动获取最新脚本
    print("开始自动化测试，将自动获取最新的Ansible脚本...")
    result = kylin_optimization_test(test_script)  # 不传入optimization_scripts，自动获取

    print("\n自动化测试结果:")
    print(result)