# -*- coding: utf-8 -*-
# @author: HRUN

import os
import sys

# 添加当前目录到Python路径
current_dir = os.path.dirname(os.path.abspath(__file__))
backend_dir = os.path.dirname(current_dir)
if backend_dir not in sys.path:
    sys.path.insert(0, backend_dir)

# 导入Django初始化模块
try:
    from django_init_simple import setup_django, get_models
    from db_manager import safe_get_env_config
except ImportError as e:
    print(f"导入模块失败: {e}")
    print(f"当前Python路径: {sys.path}")
    print(f"当前目录: {current_dir}")
    print(f"后端目录: {backend_dir}")
    raise

# 初始化Django
setup_django()

import time
import json
import logging
import threading
import subprocess
import requests
import paramiko
from typing import Dict, List, Optional, Any, Tuple
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed

from performanceengine.main import get_host_ip

# 配置日志
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)


class PortManager:
    """端口管理器"""
    
    def __init__(self, ssh_client):
        self.ssh = ssh_client
        self.port_range = range(8100, 8151)  # 8100-8150
    
    def cleanup_ports(self, master_only: bool = True) -> bool:
        """清理可能占用端口的进程
        
        Args:
            master_only: 如果为True，只清理主节点相关进程；如果为False，清理所有Locust进程
        """
        # 注释掉端口清理功能，避免影响分布式测试
        logger.info("端口清理功能已禁用")
        return True
        
        # try:
        #     if master_only:
        #         logger.info("开始清理主节点相关进程...")
        #     else:
        #         logger.info("开始清理所有Locust进程...")
        #     
        #     # 清理Locust相关进程
        #     if master_only:
        #         # 只清理主节点进程，避免影响工作节点
        #         cleanup_commands = [
        #             'pkill -f "locust.*master"',
        #             'pkill -f "python.*locust.*master"',
        #             'pkill -f "gevent.*server.*master"'
        #         ]
        #     else:
        #         # 清理所有Locust进程（包括工作节点）
        #         cleanup_commands = [
        #             'pkill -f "locust.*master"',
        #             'pkill -f "locust.*worker"',
        #             'pkill -f "python.*locust"',
        #             'pkill -f "gevent.*server"'
        #         ]
        #     
        #     for cmd in cleanup_commands:
        #         try:
        #             stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=10)
        #             time.sleep(1)
        #         except Exception as e:
        #             logger.warning(f"执行清理命令失败: {cmd}, 错误: {e}")
        #     
        #     # 等待进程完全停止
        #     time.sleep(3)
        #     
        #     # 检查是否还有进程占用端口
        #     for port in self.port_range:
        #         try:
        #             stdin, stdout, stderr = self.ssh.exec_command(f'lsof -ti :{port}', timeout=10)
        #             occupied_pids = stdout.read().decode().strip()
        #             
        #             if occupied_pids:
        #                 logger.warning(f"端口 {port} 仍被进程占用: {occupied_pids}")
        #                 # 强制杀死占用端口的进程（只杀死主节点相关进程）
        #                 for pid in occupied_pids.split('\n'):
        #                     if pid.strip():
        #                         try:
        #                             # 检查进程是否是主节点进程
        #                             stdin, stdout, stderr = self.ssh.exec_command(f'ps -p {pid.strip()} -o args=', timeout=10)
        #                             process_info = stdout.read().decode().strip()
        #                             
        #                             if master_only and "locust" in process_info and "master" in process_info:
        #                                 # 只杀死主节点进程
        #                                 self.ssh.exec_command(f'kill -9 {pid.strip()}', timeout=10)
        #                                 logger.info(f"强制杀死主节点进程: {pid.strip()}")
        #                             elif not master_only:
        #                                 # 杀死所有Locust进程
        #                                 self.ssh.exec_command(f'kill -9 {pid.strip()}', timeout=10)
        #                                 logger.info(f"强制杀死进程: {pid.strip()}")
        #                             else:
        #                                 logger.info(f"跳过工作节点进程: {pid.strip()}")
        #                         except Exception as e:
        #                             logger.warning(f"杀死进程失败: {pid.strip()}, 错误: {e}")
        #             except Exception as e:
        #                 logger.warning(f"检查端口 {port} 占用情况失败: {e}")
        #     
        #     # 再次等待
        #     time.sleep(2)
        #     
        #     logger.info("端口清理完成")
        #     return True
        #     
        # except Exception as e:
        #     logger.error(f"端口清理失败: {e}")
        #     return False
    
    def find_available_port(self, preferred_port: Optional[int] = None) -> Optional[int]:
        """查找可用的Web端口，参考单机运行的逻辑"""
        try:
            import time
            import random
            
            # 使用随机延迟，减少并发冲突
            time.sleep(random.uniform(0.5, 1.5))
            
            # 定义端口范围
            port_range = list(self.port_range)
            
            # 如果指定了首选端口，将其放在第一位
            if preferred_port and preferred_port in port_range:
                port_range.insert(0, preferred_port)
            
            logger.info(f"开始检查端口可用性，端口范围: {port_range[:5]}...{port_range[-5:]}")
            
            for port in port_range:
                # 检查端口是否被占用
                try:
                    # 首先检查netstat状态
                    stdin, stdout, stderr = self.ssh.exec_command(f'netstat -tlnp | grep :{port}', timeout=10)
                    netstat_result = stdout.read().decode().strip()
                    
                    if not netstat_result:
                        # 端口没有被netstat检测到，认为可用
                        logger.info(f"找到可用端口: {port}")
                        return port
                    else:
                        # 检查是否是docker-proxy占用的端口
                        if "docker-proxy" in netstat_result:
                            logger.warning(f"端口 {port} 被docker-proxy占用: {netstat_result}")
                            # 对于docker-proxy占用的端口，我们可以尝试使用，因为docker-proxy通常不会阻止其他进程绑定
                            logger.info(f"尝试使用docker-proxy占用的端口: {port}")
                            return port
                        else:
                            logger.warning(f"端口 {port} 被其他进程占用: {netstat_result}")
                            
                            # 进一步检查lsof状态
                            stdin, stdout, stderr = self.ssh.exec_command(f'lsof -ti :{port}', timeout=10)
                            occupied_pids = stdout.read().decode().strip()
                            
                            if occupied_pids:
                                # 检查占用进程的详细信息
                                try:
                                    stdin, stdout, stderr = self.ssh.exec_command(f'ps -p {occupied_pids} -o pid,ppid,cmd --no-headers', timeout=10)
                                    process_info = stdout.read().decode().strip()
                                    logger.info(f"端口 {port} 被进程占用: {process_info}")
                                    
                                    # 如果是docker-proxy进程，可以尝试使用该端口
                                    if "docker-proxy" in process_info:
                                        logger.info(f"端口 {port} 被docker-proxy占用，尝试使用: {process_info}")
                                        return port
                                except Exception as e:
                                    logger.warning(f"获取进程信息失败: {e}")
                                    logger.info(f"端口 {port} 被进程占用: {occupied_pids}")
                            else:
                                # lsof没有检测到占用，但netstat检测到了，可能是误报
                                logger.info(f"端口 {port} lsof未检测到占用，但netstat检测到，尝试使用")
                                return port
                                
                except Exception as e:
                    logger.warning(f"检查端口 {port} 占用情况失败: {e}")
                    continue
            
            # 如果所有端口都被占用，强制清理并重试
            logger.warning(f"端口冲突，清理端口 8100")
            try:
                self.kill_process_using_port(8100)
                # 等待一下让进程完全退出
                time.sleep(3)
                
                # 再次尝试绑定端口
                if self.is_port_available(8100):
                    logger.info(f"清理后端口 8100 可用")
                    return 8100
                else:
                    logger.error("清理后端口 8100 仍不可用")
            except Exception as e:
                logger.error(f"端口清理失败: {e}")
            
            logger.error("所有端口都被占用")
            return None
            
        except Exception as e:
            logger.error(f"检查端口可用性失败: {e}")
            return None
    
    def is_port_available(self, port: int) -> bool:
        """检查指定端口是否可用"""
        try:
            stdin, stdout, stderr = self.ssh.exec_command(f'lsof -ti :{port}', timeout=10)
            occupied_pids = stdout.read().decode().strip()
            
            if occupied_pids:
                return False
            
            # 再次确认
            try:
                stdin, stdout, stderr = self.ssh.exec_command(f'netstat -tlnp | grep :{port}', timeout=10)
                netstat_result = stdout.read().decode().strip()
                
                return not netstat_result
            except Exception as e:
                logger.warning(f"检查端口 {port} netstat状态失败: {e}")
                # 如果netstat检查失败，但lsof显示端口可用，仍然认为端口可用
                return True
            
        except Exception as e:
            logger.error(f"检查端口 {port} 可用性失败: {e}")
            return False
    
    def kill_process_using_port(self, port: int) -> bool:
        """杀掉占用指定端口的进程，参考单机运行的逻辑"""
        try:
            logger.info(f"开始清理端口 {port} 的占用进程")
            
            # 查找占用端口的进程
            stdin, stdout, stderr = self.ssh.exec_command(f'lsof -ti :{port}', timeout=10)
            occupied_pids = stdout.read().decode().strip()
            
            if not occupied_pids:
                logger.info(f"端口 {port} 没有被进程占用")
                return True
            
            # 杀掉占用端口的进程
            pids = occupied_pids.split('\n')
            for pid in pids:
                if pid.strip():
                    try:
                        # 先检查进程信息
                        stdin, stdout, stderr = self.ssh.exec_command(f'ps -p {pid.strip()} -o pid,ppid,cmd --no-headers', timeout=10)
                        process_info = stdout.read().decode().strip()
                        logger.info(f"准备杀掉进程: PID {pid.strip()}, 信息: {process_info}")
                        
                        # 杀掉进程
                        self.ssh.exec_command(f'kill -9 {pid.strip()}', timeout=10)
                        logger.info(f"已杀掉进程: PID {pid.strip()}")
                        
                    except Exception as e:
                        logger.warning(f"杀掉进程 {pid.strip()} 失败: {e}")
            
            # 等待一下让进程完全退出
            time.sleep(2)
            
            # 验证端口是否已释放
            if self.is_port_available(port):
                logger.info(f"端口 {port} 已成功释放")
                return True
            else:
                logger.warning(f"端口 {port} 可能仍被占用")
                return False
                
        except Exception as e:
            logger.error(f"清理端口 {port} 失败: {e}")
            return False


class DistributedTestManager:
    """分布式测试管理器"""
    
    def __init__(self, task_id: int = 0):
        """初始化分布式测试管理器"""
        self.task_id = task_id
        self.master_process = None
        self.worker_processes = {}
        self.ssh_clients = {}
        self.master_server = None
        self.worker_servers = []
        self.test_running = False
        self.results = {}
        self.web_port = 8100  # 默认端口，可以通过配置修改
        self.port_manager = None  # 端口管理器
        
        # 获取任务信息
        if task_id:
            self._get_task_info()
            
    def setup_distributed_test(self) -> bool:
        """设置分布式测试环境 - 主节点同步启动，Web服务ready后才返回True"""
        try:
            logger.info(f"开始设置分布式测试环境 - 任务ID: {self.task_id}")
            
            # 获取任务信息
            logger.info("步骤1: 开始获取任务信息...")
            task_info = self._get_task_info()
            if not task_info:
                logger.error(f"无法获取任务信息: {self.task_id}")
                return False
            
            self.master_server = task_info['master_server']
            self.worker_servers = task_info['worker_servers']
            
            logger.info(f"步骤1完成: 任务配置验证通过 - 主服务器: {self.master_server.host_ip}, 工作服务器: {len(self.worker_servers)}个")
            
            # 检查服务器连接
            logger.info("步骤2: 开始检查服务器连接...")
            if not self._check_server_connections():
                logger.error("步骤2失败: 服务器连接检查失败")
                return False
            logger.info("步骤2完成: 服务器连接检查成功")
            
            # 准备测试数据（在分发文件之前）
            logger.info("步骤3: 开始准备测试数据...")
            try:
                from performanceengine.main import run_task
                # 获取环境ID和Web端口（从实例属性获取）
                env_id = getattr(self, 'env_id', 1)  # 默认环境ID
                web_port = getattr(self, 'web_port', 8100)  # 默认Web端口
                logger.info(f"使用Web端口: {web_port}")
                executor = 'system'
                
                # 执行任务准备，生成测试数据（跨平台超时处理）
                import platform
                import threading
                import time
                
                test_data = None
                timeout_occurred = False
                
                def run_task_with_timeout():
                    nonlocal test_data, timeout_occurred
                    try:
                        test_data = run_task(self.task_id, env_id, executor, web_port, create_report=False)
                    except Exception as e:
                        logger.error(f"run_task执行异常: {e}")
                        timeout_occurred = True
                
                # 创建线程执行run_task
                task_thread = threading.Thread(target=run_task_with_timeout)
                task_thread.daemon = True
                task_thread.start()
                
                # 等待最多5秒
                task_thread.join(timeout=5)
                
                if task_thread.is_alive():
                    logger.warning("步骤3警告: 测试数据准备超时，但继续执行")
                    # 即使超时也继续，让后台处理
                elif timeout_occurred:
                    logger.error("步骤3失败: run_task执行异常")
                    return False
                else:
                    logger.info("步骤3完成: 测试数据准备完成")
                    
            except Exception as e:
                logger.error(f"步骤3失败: 准备测试数据失败: {e}")
                return False
            
            # 分发测试文件
            logger.info("步骤4: 开始分发测试文件...")
            if not self._distribute_test_files():
                logger.error("步骤4失败: 测试文件分发失败")
                return False
            logger.info("步骤4完成: 测试文件分发成功")
            
            # 同步启动主节点并检测Web服务ready
            logger.info("步骤5: 启动主节点并等待Web服务ready...")
            if not self._start_master_node():
                logger.error("步骤5失败: 主节点启动失败")
                return False
            # 检查Web服务ready
            master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
            max_retries = 15
            for i in range(max_retries):
                try:
                    import requests
                    response = requests.get(f"{master_url}/", timeout=5)
                    if response.status_code == 200:
                        logger.info(f"主节点Web服务已启动: {master_url}")
                        break
                except Exception as e:
                    logger.info(f"等待主节点Web服务启动... ({i+1}/{max_retries}) - {e}")
                import time
                time.sleep(2)
            else:
                logger.error(f"主节点Web服务启动超时: {master_url}")
                return False
            logger.info("步骤5完成: 主节点Web服务ready")
            logger.info(f"分布式测试环境设置成功 - 任务ID: {self.task_id}")
            return True
        except Exception as e:
            logger.error(f"设置分布式测试环境失败: {e}")
            return False
    
    def _get_task_info(self) -> Optional[Dict]:
        """获取任务信息"""
        try:
            # 使用线程安全的数据库操作
            from db_manager import thread_safe_db_operation
            
            @thread_safe_db_operation
            def _get_task():
                # 获取Django模型
                models = get_models()
                PerformanceTask = models['PerformanceTask']
                
                task = PerformanceTask.objects.get(id=self.task_id)
                return task
            
            task = _get_task()
            
            # 验证分布式配置
            if task.distributed_mode != 'distributed':
                logger.error(f"任务 {self.task_id} 不是分布式模式，当前模式: {task.distributed_mode}")
                return None
            
            if not task.master_server:
                logger.error(f"任务 {self.task_id} 未配置主服务器")
                return None
            
            worker_servers = list(task.worker_servers.all())
            if not worker_servers:
                logger.error(f"任务 {self.task_id} 未配置工作服务器")
                return None
            
            logger.info(f"获取任务信息成功 - 主服务器: {task.master_server.host_ip}, 工作服务器数量: {len(worker_servers)}")
            
            return {
                'master_server': task.master_server,
                'worker_servers': worker_servers,
                'distributed_mode': task.distributed_mode,
                'worker_distribution': getattr(task, 'worker_distribution', {}),
                'total_workers': getattr(task, 'total_workers', len(worker_servers))
            }
            
        except Exception as e:
            logger.error(f"获取任务信息失败: {e}")
            return None
    
    def _check_server_connections(self) -> bool:
        """检查服务器连接 - 只检查主服务器，工作服务器异步检查"""
        try:
            # 只检查主服务器连接（必须成功）
            if not self._test_ssh_connection(self.master_server):
                logger.error(f"主服务器连接失败: {self.master_server.host_ip}")
                return False
            
            # 工作服务器连接在后台异步检查
            def check_workers_async():
                try:
                    logger.info("开始异步检查工作服务器连接...")
                    for worker in self.worker_servers:
                        if not self._test_ssh_connection(worker):
                            logger.warning(f"工作服务器连接失败: {worker.host_ip}")
                        else:
                            logger.info(f"工作服务器连接成功: {worker.host_ip}")
                except Exception as e:
                    logger.error(f"异步检查工作服务器连接失败: {e}")
            
            # 使用线程异步检查工作服务器
            import threading
            worker_check_thread = threading.Thread(target=check_workers_async)
            worker_check_thread.daemon = True
            worker_check_thread.start()
            
            return True
            
        except Exception as e:
            logger.error(f"服务器连接检查失败: {e}")
            return False
    
    def _test_ssh_connection(self, server) -> bool:
        """测试SSH连接"""
        try:
            logger.info(f"开始测试SSH连接: {server.host_ip}")
            
            # 验证服务器配置
            if not server.host_ip:
                logger.error(f"服务器 {server.name} 未配置IP地址")
                return False
            
            if not server.sys_user_name:
                logger.error(f"服务器 {server.name} 未配置用户名")
                return False
            
            if not server.sys_user_passwd:
                logger.error(f"服务器 {server.name} 未配置密码")
                return False
            
            # 解密密码
            try:
                from common.encryption_and_decryption import decrypt_field, load_key
                key = load_key()
                decrypted_password = decrypt_field(server.sys_user_passwd, key)
                logger.info(f"密码解密成功")
            except Exception as e:
                logger.error(f"密码解密失败: {e}")
                return False
            
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            
            # 连接参数 - 减少超时时间
            connect_params = {
                'hostname': server.host_ip,
                'port': server.host_port or 22,
                'username': server.sys_user_name,
                'password': decrypted_password,
                'timeout': 5  # 减少到5秒
            }
            
            logger.info(f"尝试连接 {server.host_ip}:{connect_params['port']} 用户: {server.sys_user_name}")
            
            ssh.connect(**connect_params)
            
            # 快速测试基本命令 - 设置超时
            stdin, stdout, stderr = ssh.exec_command('echo "connection test"', timeout=3)
            result = stdout.read().decode().strip()
            
            if result == "connection test":
                self.ssh_clients[server.id] = ssh
                logger.info(f"SSH连接成功: {server.host_ip}")
                return True
            else:
                ssh.close()
                logger.error(f"SSH连接测试失败: 命令执行结果不正确")
                return False
                
        except paramiko.AuthenticationException as e:
            logger.error(f"SSH认证失败 {server.host_ip}: {e}")
            return False
        except paramiko.SSHException as e:
            logger.error(f"SSH连接异常 {server.host_ip}: {e}")
            return False
        except Exception as e:
            logger.error(f"SSH连接测试失败 {server.host_ip}: {e}")
            return False
    
    def _distribute_test_files(self) -> bool:
        """分发测试文件 - 只分发到主服务器，工作服务器异步分发"""
        try:
            # 获取测试文件内容
            test_script = self._generate_test_script()
            if not test_script:
                return False
            
            # 获取配置文件内容
            config_content = self._get_locust_config()
            if not config_content:
                logger.warning("无法获取locust.conf配置文件，将使用默认配置")
            
            # 只分发到主服务器（必须成功）
            if not self._upload_test_file(self.master_server, test_script, config_content):
                logger.error(f"主服务器文件分发失败: {self.master_server.host_ip}")
                return False
            
            # 工作服务器文件在后台异步分发
            def distribute_to_workers_async():
                try:
                    logger.info("开始异步分发文件到工作服务器...")
                    for worker in self.worker_servers:
                        try:
                            if self._upload_test_file(worker, test_script, config_content):
                                logger.info(f"工作服务器文件分发成功: {worker.host_ip}")
                            else:
                                logger.warning(f"工作服务器文件分发失败: {worker.host_ip}")
                        except Exception as e:
                            logger.warning(f"工作服务器文件分发异常: {worker.host_ip} - {e}")
                except Exception as e:
                    logger.error(f"异步分发文件到工作服务器失败: {e}")
            
            # 使用线程异步分发到工作服务器
            import threading
            worker_distribute_thread = threading.Thread(target=distribute_to_workers_async)
            worker_distribute_thread.daemon = True
            worker_distribute_thread.start()
            
            return True
            
        except Exception as e:
            logger.error(f"测试文件分发失败: {e}")
            return False
    
    def _get_locust_config(self) -> Optional[str]:
        """获取Locust配置文件内容"""
        try:
            import os
            config_path = os.path.join(os.path.dirname(__file__), 'locust.conf')
            if os.path.exists(config_path):
                with open(config_path, 'r', encoding='utf-8') as f:
                    config_content = f.read()
                
                # 动态修改配置文件，处理时间参数
                config_lines = config_content.split('\n')
                modified_lines = []
                
                for line in config_lines:
                    # 如果设置了duration参数，则移除或修改run-time配置
                    if line.strip().startswith('run-time') and hasattr(self, 'duration') and self.duration:
                        # 使用分布式管理器设置的duration参数
                        modified_lines.append(f"run-time = {self.duration}s")
                        logger.info(f"修改配置文件中的run-time参数: {self.duration}s")
                    else:
                        modified_lines.append(line)
                
                return '\n'.join(modified_lines)
            else:
                logger.warning(f"配置文件不存在: {config_path}")
                return None
        except Exception as e:
            logger.error(f"读取配置文件失败: {e}")
            return None

    def _generate_test_script(self) -> Optional[str]:
        """生成测试脚本"""
        try:
            script_content = '''
import json
import logging
from locust import HttpUser, task, between, events
import time

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 测试数据
try:
    with open("data.json", "r", encoding="utf-8") as f:
        TEST_DATA = json.load(f)
    logger.info("成功加载测试数据")
except Exception as e:
    logger.error(f"加载测试数据失败: {e}")
    TEST_DATA = {"scenes": [], "presetting": {}}

class PerformanceUser(HttpUser):
    wait_time = between(1, 3)
    
    def on_start(self):
        logger.info("用户开始")
        self.scenes = TEST_DATA.get('scenes', [])
        self.presetting = TEST_DATA.get('presetting', {})
        logger.info(f"加载了 {len(self.scenes)} 个场景")
        
    @task(1)
    def execute_test_scenarios(self):
        logger.info("开始执行测试场景")
        for scene in self.scenes:
            self._execute_scene(scene)
    
    def _execute_scene(self, scene):
        scene_name = scene.get('name', '未命名场景')
        logger.info(f"执行场景: {scene_name}")
        steps = scene.get('steps', [])
        for step in steps:
            self._execute_step(step)
    
    def _execute_step(self, step):
        step_type = step.get('type', 'unknown')
        logger.info(f"执行步骤类型: {step_type}")
        
        if step_type == 'api':
            self._execute_api_step(step)
        elif step_type == 'wait':
            wait_time = step.get('waitTime', 1)
            logger.info(f"等待 {wait_time} 秒")
            time.sleep(wait_time)
        else:
            logger.warning(f"未知步骤类型: {step_type}")
    
    def _execute_api_step(self, step):
        try:
            content = step.get('content', {})
            method = content.get('method', 'GET').upper()
            url = content.get('url', '')
            headers = content.get('headers', {})
            request_data = content.get('request', {})
            
            logger.info(f"执行API请求: {method} {url}")
            
            if method == 'GET':
                response = self.client.get(url, headers=headers, params=request_data.get('params', {}))
            elif method == 'POST':
                response = self.client.post(url, headers=headers, json=request_data.get('json', {}))
            elif method == 'PUT':
                response = self.client.put(url, headers=headers, json=request_data.get('json', {}))
            elif method == 'DELETE':
                response = self.client.delete(url, headers=headers)
            else:
                logger.warning(f"不支持的HTTP方法: {method}")
                return
                
            logger.info(f"API请求完成: {response.status_code}")
                
        except Exception as e:
            logger.error(f"API请求失败: {e}")

# 事件监听器
@events.test_start.add_listener
def on_test_start(environment, **kwargs):
    logger.info("分布式测试开始")

@events.test_stop.add_listener
def on_test_stop(environment, **kwargs):
    logger.info("分布式测试结束")

@events.request.add_listener
def on_request(request_type, name, response_time, response_length, response, context, exception, start_time, url, **kwargs):
    if exception:
        logger.error(f"请求失败: {name} - {exception}")
    else:
        logger.info(f"请求成功: {name} - {response_time}ms")
'''
            return script_content
        except Exception as e:
            logger.error(f"生成测试脚本失败: {e}")
            return None

    def _upload_test_file(self, server, script_content: str, config_content: str = None) -> bool:
        """上传测试文件（包括locustfile.py、data.json和locust.conf）"""
        try:
            ssh = self.ssh_clients.get(server.id)
            if not ssh:
                return False
            # 创建测试目录
            stdin, stdout, stderr = ssh.exec_command('mkdir -p /tmp/locust_test')
            # 上传测试脚本
            sftp = ssh.open_sftp()
            with sftp.open('/tmp/locust_test/locustfile.py', 'w') as f:
                f.write(script_content)
            # 上传数据文件
            from performanceengine.params import get_data_file_path
            local_data_path = get_data_file_path()
            with open(local_data_path, 'r', encoding='utf-8') as local_data:
                with sftp.open('/tmp/locust_test/data.json', 'w') as remote_data:
                    remote_data.write(local_data.read())
            # 上传配置文件（如果存在）
            if config_content:
                with sftp.open('/tmp/locust_test/locust.conf', 'w') as f:
                    f.write(config_content)
                logger.info(f"配置文件上传成功: {server.host_ip}")
            sftp.close()
            return True
        except Exception as e:
            logger.error(f"上传测试文件失败 {server.host_ip}: {e}")
            return False
    
    def _start_master_node(self) -> bool:
        """启动主节点 - 快速启动，后台异步等待"""
        max_retries = 3
        retry_count = 0
        
        while retry_count < max_retries:
            try:
                retry_count += 1
                logger.info(f"尝试启动主节点 (第{retry_count}次)")
                
                if self._attempt_start_master():
                    return True
                else:
                    if retry_count < max_retries:
                        logger.warning(f"主节点启动失败，等待5秒后重试...")  # 减少等待时间
                        time.sleep(5)  # 减少到5秒
                        # 注释掉端口清理，避免影响测试
                        # if self.port_manager:
                        #     self.port_manager.cleanup_ports(master_only=True)
                    else:
                        logger.error("主节点启动失败，已达到最大重试次数")
                        return False
                        
            except Exception as e:
                logger.error(f"启动主节点异常 (第{retry_count}次): {e}")
                if retry_count < max_retries:
                    time.sleep(5)  # 减少到5秒
                else:
                    return False
        
        return False

    def _attempt_start_master(self) -> bool:
        """尝试启动主节点（单次尝试）"""
        try:
            ssh = self.ssh_clients.get(self.master_server.id)
            if not ssh:
                logger.error("无法获取SSH连接")
                return False
            
            # 初始化端口管理器
            if not self.port_manager:
                self.port_manager = PortManager(ssh)
            
            logger.info(f"开始启动主节点: {self.master_server.host_ip}")
            
            # 检查Locust是否安装
            try:
                stdin, stdout, stderr = ssh.exec_command('which locust', timeout=10)
                locust_path = stdout.read().decode().strip()
                if not locust_path:
                    logger.error(f"远程服务器 {self.master_server.host_ip} 未安装Locust")
                    logger.error("请在机器管理中测试连接后，通过弹窗安装Locust依赖")
                    logger.error("安装命令: pip install locust==2.32.1")
                    return False
            except Exception as e:
                logger.error(f"检查Locust安装失败: {e}")
                return False
            
            # 检查Locust版本
            try:
                stdin, stdout, stderr = ssh.exec_command('locust --version', timeout=10)
                version_output = stdout.read().decode().strip()
                logger.info(f"Locust版本: {version_output}")
            except Exception as e:
                logger.warning(f"检查Locust版本失败: {e}")
            
            # 检查Python版本
            try:
                stdin, stdout, stderr = ssh.exec_command('python3 --version', timeout=10)
                python_version = stdout.read().decode().strip()
                logger.info(f"Python版本: {python_version}")
            except Exception as e:
                logger.warning(f"检查Python版本失败: {e}")
            
            logger.info(f"Locust路径: {locust_path}")
            
            # 检查测试文件是否存在
            try:
                stdin, stdout, stderr = ssh.exec_command('ls -la /tmp/locust_test/', timeout=10)
                file_list = stdout.read().decode().strip()
                logger.info(f"测试目录内容: {file_list}")
            except Exception as e:
                logger.warning(f"检查测试文件失败: {e}")
            
            # 停止可能存在的进程（只停止主节点进程，避免影响工作节点）
            logger.info("停止可能存在的Locust主节点进程...")
            try:
                # 只停止主节点进程，不停止工作节点进程
                ssh.exec_command('pkill -f "locust.*master"', timeout=10)
                time.sleep(3)  # 增加等待时间确保进程完全停止
            except Exception as e:
                logger.warning(f"停止主节点进程失败: {e}")
            
            # 查找可用端口
            web_port = self.port_manager.find_available_port(self.web_port)
            if not web_port:
                logger.error("无法找到可用的Web端口")
                return False
            
            self.web_port = web_port
            logger.info(f"使用Web端口: {web_port}")
            
            # 启动主节点（无头模式，自动开始压测）
            master_port = self.master_server.locust_master_port or 5557
            logger.info(f"启动主节点 - 主端口: {master_port}, Web端口: {web_port}")
            
            # 获取压测参数（从任务配置或使用默认值）
            users = getattr(self, 'users', 10)
            spawn_rate = getattr(self, 'spawn_rate', 1)
            duration = getattr(self, 'duration', None)
            
            # 简化启动命令，参考单机运行方式
            # 主节点启动时不使用 --autostart，等待工作节点连接后再手动开始测试
            master_cmd = (
                f"cd /tmp/locust_test && "
                f"nohup {locust_path} -f locustfile.py "
                f"--master --master-bind-host=0.0.0.0 --master-bind-port={master_port} "
                f"--web-host=0.0.0.0 --web-port={web_port} "
                f"--config=locust.conf > master.log 2>&1 &"
            )
            
            logger.info(f"执行主节点启动命令: {master_cmd}")
            
            try:
                # 直接执行命令
                stdin, stdout, stderr = ssh.exec_command(master_cmd, timeout=30)
                
                # 等待命令执行完成
                exit_status = stdout.channel.recv_exit_status()
                
                if exit_status != 0:
                    err = stderr.read().decode().strip()
                    logger.error(f"主节点启动命令失败: {err}")
                    return False
                else:
                    logger.info("主节点启动命令执行成功")
                    
            except Exception as e:
                logger.error(f"执行主节点启动命令异常: {e}")
                return False
                
                # 等待一下让进程启动
                time.sleep(8)  # 增加等待时间
                
                # 第三步：获取PID
                try:
                    stdin, stdout, stderr = ssh.exec_command('pgrep -f "locust.*master"', timeout=10)
                    pid = stdout.read().decode().strip()
                    logger.info(f"主节点进程PID: {pid}")
                except Exception as e:
                    logger.warning(f"获取主节点PID失败: {e}")
                    pid = None
                
                # 检查日志文件，确认启动状态
                try:
                    stdin, stdout, stderr = ssh.exec_command('tail -20 /tmp/locust_test/master.log', timeout=10)
                    log_content = stdout.read().decode().strip()
                    logger.info(f"主节点启动日志: {log_content}")
                except Exception as e:
                    logger.warning(f"读取主节点日志失败: {e}")
                
                # 检查nohup.out文件（如果存在）
                try:
                    stdin, stdout, stderr = ssh.exec_command('tail -5 /tmp/locust_test/nohup.out', timeout=10)
                    nohup_content = stdout.read().decode().strip()
                    if nohup_content:
                        logger.info(f"主节点nohup输出: {nohup_content}")
                except Exception as e:
                    # nohup.out可能不存在，这是正常的
                    pass
                
                # 检查是否有错误日志
                try:
                    stdin, stdout, stderr = ssh.exec_command('grep -i error /tmp/locust_test/master.log | tail -5', timeout=10)
                    error_log = stdout.read().decode().strip()
                    if error_log:
                        logger.error(f"主节点错误日志: {error_log}")
                except Exception as e:
                    logger.warning(f"检查错误日志失败: {e}")
                
                # 检查配置文件是否存在和内容
                try:
                    stdin, stdout, stderr = ssh.exec_command('cat /tmp/locust_test/locust.conf', timeout=10)
                    config_content = stdout.read().decode().strip()
                    logger.info(f"Locust配置文件内容: {config_content}")
                except Exception as e:
                    logger.warning(f"读取配置文件失败: {e}")
                
                # 检查locustfile.py是否存在
                try:
                    stdin, stdout, stderr = ssh.exec_command('ls -la /tmp/locust_test/locustfile.py', timeout=10)
                    file_info = stdout.read().decode().strip()
                    logger.info(f"Locust文件信息: {file_info}")
                except Exception as e:
                    logger.warning(f"检查locustfile.py失败: {e}")
            
            except Exception as e:
                logger.error(f"启动主节点进程失败: {e}")
                return False
            
            # 改进的进程检查逻辑
            try:
                # 使用多种方式检查进程是否启动
                # 方式1：使用pgrep检查
                stdin, stdout, stderr = ssh.exec_command('pgrep -f "locust.*master"', timeout=10)
                running_pid = stdout.read().decode().strip()
                logger.info(f"运行中的Locust主进程PID (pgrep): {running_pid}")
                
                # 方式2：使用ps检查
                stdin, stdout, stderr = ssh.exec_command('ps aux | grep "locust.*master" | grep -v grep', timeout=10)
                ps_output = stdout.read().decode().strip()
                logger.info(f"运行中的Locust主进程 (ps): {ps_output}")
                
                # 方式3：检查端口是否被监听
                stdin, stdout, stderr = ssh.exec_command(f'netstat -tlnp | grep :{master_port}', timeout=10)
                port_output = stdout.read().decode().strip()
                logger.info(f"主端口监听状态: {port_output}")
                
                stdin, stdout, stderr = ssh.exec_command(f'netstat -tlnp | grep :{web_port}', timeout=10)
                web_port_output = stdout.read().decode().strip()
                logger.info(f"Web端口监听状态: {web_port_output}")
                
                # 综合判断进程是否启动
                if running_pid or ps_output or port_output or web_port_output:
                    logger.info(f"主节点进程已启动: PID {running_pid}")
                    return True
                else:
                    logger.warning("未检测到运行中的主进程")
                    
                    # 检查是否有错误日志
                    try:
                        stdin, stdout, stderr = ssh.exec_command('grep -i error /tmp/locust_test/master.log | tail -5', timeout=10)
                        error_log = stdout.read().decode().strip()
                        if error_log:
                            logger.error(f"主节点错误日志: {error_log}")
                    except Exception as e:
                        logger.warning(f"检查错误日志失败: {e}")
                    
                    return False
                    
            except Exception as e:
                logger.warning(f"检查主节点状态失败: {e}")
                return False
                
        except Exception as e:
            logger.error(f"启动主节点失败: {e}")
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")
            return False
    

    
    def _start_worker_nodes(self) -> bool:
        """启动工作节点"""
        try:
            logger.info("_start_worker_nodes 被调用，开始批量启动所有 worker 节点...")
            master_host = self.master_server.host_ip
            master_port = self.master_server.locust_master_port or 5557
            
            # 并发启动所有工作节点
            with ThreadPoolExecutor(max_workers=len(self.worker_servers)) as executor:
                futures = []
                
                for worker in self.worker_servers:
                    logger.info(f"准备启动 worker: {worker.host_ip}")
                    future = executor.submit(self._start_single_worker, worker, master_host, master_port)
                    futures.append(future)
                
                # 等待所有工作节点启动
                results = []
                for future in as_completed(futures):
                    results.append(future.result())
                
                # 检查是否所有工作节点都启动成功
                if all(results):
                    logger.info("所有工作节点启动成功")
                    return True
                else:
                    logger.error("部分工作节点启动失败")
                    return False
                
        except Exception as e:
            logger.error(f"启动工作节点失败: {e}")
            return False
    
    def _start_single_worker(self, worker, master_host: str, master_port: int) -> bool:
        """启动单个工作节点"""
        try:
            ssh = self.ssh_clients.get(worker.id)
            if not ssh:
                logger.error(f"无法获取工作节点SSH连接: {worker.host_ip}")
                return False
            
            logger.info(f"开始启动工作节点: {worker.host_ip}")
            
            # 检查Locust是否安装
            stdin, stdout, stderr = ssh.exec_command('which locust')
            locust_path = stdout.read().decode().strip()
            if not locust_path:
                logger.error(f"工作节点 {worker.host_ip} 未安装Locust")
                logger.error("请在机器管理中测试连接后，通过弹窗安装Locust依赖")
                logger.error("安装命令: pip install locust==2.32.1")
                return False
            
            # 检查Locust版本
            try:
                stdin, stdout, stderr = ssh.exec_command(f'{locust_path} --version', timeout=10)
                version_output = stdout.read().decode().strip()
                logger.info(f"工作节点Locust版本: {version_output}")
            except Exception as e:
                logger.warning(f"检查Locust版本失败: {worker.host_ip} - {e}")
            
            # 检查Python版本
            try:
                stdin, stdout, stderr = ssh.exec_command('python3 --version', timeout=10)
                python_version = stdout.read().decode().strip()
                logger.info(f"工作节点Python版本: {python_version}")
            except Exception as e:
                logger.warning(f"检查Python版本失败: {worker.host_ip} - {e}")
            
            logger.info(f"工作节点Locust路径: {locust_path}")
            
            # 检查测试文件是否存在
            try:
                stdin, stdout, stderr = ssh.exec_command('ls -la /tmp/locust_test/', timeout=10)
                file_list = stdout.read().decode().strip()
                logger.info(f"工作节点测试目录内容: {file_list}")
            except Exception as e:
                logger.warning(f"检查测试文件失败: {worker.host_ip} - {e}")
            
            # 停止可能存在的进程（只停止当前工作节点的进程，避免影响其他节点）
            logger.info(f"停止可能存在的Locust工作进程: {worker.host_ip}")
            try:
                # 使用更精确的进程匹配，只停止连接到当前主节点的工作进程
                ssh.exec_command(f'pkill -f "locust.*worker.*{master_host}"', timeout=10)
                time.sleep(2)
            except Exception as e:
                logger.warning(f"停止进程失败: {worker.host_ip} - {e}")
            
            # 检查网络连接到主节点
            logger.info(f"检查工作节点到主节点的网络连接: {worker.host_ip} -> {master_host}:{master_port}")
            
            # 使用多种方式检查网络连接
            network_ok = False
            
            # 方式1：使用nc命令检查端口连通性
            try:
                stdin, stdout, stderr = ssh.exec_command(f'nc -zv {master_host} {master_port}', timeout=10)
                nc_result = stdout.read().decode().strip()
                nc_error = stderr.read().decode().strip()
                logger.info(f"网络连接检查结果 (nc): {nc_result}")
                if nc_error:
                    logger.warning(f"网络连接检查错误 (nc): {nc_error}")
                if "succeeded" in nc_result or "open" in nc_result:
                    network_ok = True
                    logger.info(f"网络连接检查成功 (nc): {worker.host_ip} -> {master_host}:{master_port}")
            except Exception as e:
                logger.warning(f"网络连接检查失败 (nc): {worker.host_ip} - {e}")
            
            # 方式2：使用telnet检查（如果nc不可用）
            if not network_ok:
                try:
                    stdin, stdout, stderr = ssh.exec_command(f'telnet {master_host} {master_port}', timeout=10)
                    telnet_result = stdout.read().decode().strip()
                    logger.info(f"网络连接检查结果 (telnet): {telnet_result[:100]}...")
                    if "Connected" in telnet_result:
                        network_ok = True
                        logger.info(f"网络连接检查成功 (telnet): {worker.host_ip} -> {master_host}:{master_port}")
                except Exception as e:
                    logger.warning(f"网络连接检查失败 (telnet): {worker.host_ip} - {e}")
            
            # 方式3：使用ping检查基本连通性
            if not network_ok:
                try:
                    stdin, stdout, stderr = ssh.exec_command(f'ping -c 3 {master_host}', timeout=15)
                    ping_result = stdout.read().decode().strip()
                    logger.info(f"网络连通性检查结果 (ping): {ping_result[:100]}...")
                    if "3 packets transmitted, 3 received" in ping_result:
                        logger.info(f"基本网络连通性正常: {worker.host_ip} -> {master_host}")
                    else:
                        logger.warning(f"基本网络连通性异常: {worker.host_ip} -> {master_host}")
                except Exception as e:
                    logger.warning(f"网络连通性检查失败 (ping): {worker.host_ip} - {e}")
            
            # 检查防火墙状态
            try:
                stdin, stdout, stderr = ssh.exec_command('systemctl status firewalld', timeout=10)
                firewall_status = stdout.read().decode().strip()
                logger.info(f"工作节点防火墙状态: {firewall_status[:200]}...")
            except Exception as e:
                logger.warning(f"检查防火墙状态失败: {worker.host_ip} - {e}")
            
            # 启动工作节点（分步执行，避免超时）
            logger.info(f"开始启动工作节点: {worker.host_ip}")
            
            # 第一步：切换到测试目录
            stdin, stdout, stderr = ssh.exec_command('cd /tmp/locust_test', timeout=10)
            exit_status = stdout.channel.recv_exit_status()
            if exit_status != 0:
                logger.error(f"切换目录失败: {worker.host_ip}")
                return False
            
            # 第二步：启动Locust工作进程（后台运行）
            # 工作节点不应该使用配置文件，因为它只是连接到主节点
            # 时间限制应该由主节点控制
            worker_cmd = (
                f"cd /tmp/locust_test && nohup {locust_path} -f locustfile.py "
                f"--worker --master-host={master_host} --master-port={master_port} "
                f"> worker.log 2>&1 &"
            )
            logger.info(f"执行工作节点启动命令: {worker_cmd}")
            stdin, stdout, stderr = ssh.exec_command(worker_cmd, timeout=15)
            exit_status = stdout.channel.recv_exit_status()
            
            if exit_status != 0:
                err = stderr.read().decode().strip()
                logger.error(f"工作节点启动命令失败: {worker.host_ip}, 错误: {err}")
                return False
            
            # 第三步：获取进程PID
            stdin, stdout, stderr = ssh.exec_command('pgrep -f "locust.*worker"', timeout=10)
            pid = stdout.read().decode().strip()
            logger.info(f"工作节点进程PID: {pid}")
            
            # 等待工作节点启动
            logger.info(f"等待工作节点启动: {worker.host_ip}")
            time.sleep(15)  # 增加等待时间，确保工作节点有足够时间连接
            
            # 检查进程是否运行
            try:
                # 使用多种方式检查进程状态
                # 方式1：使用pgrep检查
                stdin, stdout, stderr = ssh.exec_command(f'pgrep -f "locust.*worker.*{master_host}"', timeout=10)
                running_pid = stdout.read().decode().strip()
                logger.info(f"运行中的Locust工作进程PID (pgrep): {running_pid}")
                # 方式2：使用ps检查
                stdin, stdout, stderr = ssh.exec_command('ps aux | grep "locust.*worker" | grep -v grep', timeout=10)
                ps_output = stdout.read().decode().strip()
                logger.info(f"运行中的Locust工作进程 (ps): {ps_output}")
            except Exception as e:
                logger.warning(f"检查进程状态失败: {worker.host_ip} - {e}")
                running_pid = ""
                ps_output = ""
            # 只要进程存在就返回True
            if running_pid or ps_output:
                logger.info(f"工作节点进程运行中 {worker.host_ip}: PID {running_pid}")
                return True
            else:
                logger.warning(f"工作节点进程未运行: {worker.host_ip}")
                return False
        except Exception as e:
            logger.error(f"启动工作节点失败 {worker.host_ip}: {e}")
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")
            return False
    
    def start_distributed_test(self, task_id: int, master_server, worker_servers: list, env_id: int, test_config: dict) -> dict:
        """重写：分布式测试启动流程，主节点ready后再worker，worker全部ready后swarm"""
        import threading, time, requests
        from datetime import datetime
        try:
            self.task_id = task_id
            self.master_server = master_server
            self.worker_servers = worker_servers
            self.env_id = env_id
            self.web_port = test_config.get('web_port', 8100)
            self.users = test_config.get('users', 10)
            self.spawn_rate = test_config.get('spawn_rate', 1)
            self.duration = test_config.get('duration')
            
            # 添加调试日志
            logger.info(f"分布式测试启动 - test_config内容: {test_config}")
            logger.info(f"分布式测试启动 - test_config.get('report_id'): {test_config.get('report_id')}")
            
            # 设置report_id，从test_config中获取
            self.report_id = test_config.get('report_id')
            if self.report_id:
                logger.info(f"分布式测试设置报告ID: {self.report_id}")
            else:
                logger.warning("分布式测试未设置报告ID，数据收集功能将被禁用")
                logger.warning(f"test_config的所有键: {list(test_config.keys())}")
            
            # 启动分布式环境线程
            def distributed_env_thread():
                try:
                    # 主节点同步启动并检测Web服务ready
                    master_ssh = self.ssh_clients.get(self.master_server.id)
                    if master_ssh:
                        self.port_manager = PortManager(master_ssh)
                    if not self.setup_distributed_test():
                        logger.error("分布式测试环境设置失败")
                        return
                    logger.info("主节点Web服务ready，开始启动worker...")
                    # 启动worker节点
                    worker_success = self._start_worker_nodes()
                    expected_workers = len(self.worker_servers)
                    if not worker_success:
                        logger.warning("部分工作节点启动失败，但主节点已正常运行")
                        logger.info("尝试启动测试（仅主节点模式）...")
                        self.start_test_with_params(self.users, self.spawn_rate, self.duration)
                        return
                    # 等待所有worker注册
                    master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
                    max_wait = 60
                    interval = 3
                    waited = 0
                    while waited < max_wait:
                        try:
                            response = requests.get(f"{master_url}/stats/requests", timeout=10)
                            if response.status_code == 200:
                                data = response.json()
                                connected_workers = len(data.get('workers', []))
                                logger.info(f"已连接到主节点的工作节点数量: {connected_workers}/{expected_workers}")
                                if connected_workers > 0:
                                    logger.info("检测到有worker注册，自动启动压测...")
                                    self.start_test_with_params(self.users, self.spawn_rate, self.duration)
                                    break
                        except Exception as e:
                            logger.warning(f"检查工作节点状态失败: {e}")
                        time.sleep(interval)
                        waited += interval
                    else:
                        logger.error(f"等待worker注册超时({max_wait}s)，未能检测到worker，未自动swarm！")
                    # 启动定时数据收集线程
                    def periodic_data_collection():
                        try:
                            logger.info("启动定时数据收集...")
                            while self.test_running:
                                logger.info("定时收集循环... self.test_running=%s" % self.test_running)
                                try:
                                    if hasattr(self, 'report_id') and self.report_id:
                                        from performanceengine.taskResult import update_report_with_results, finalize_report
                                        stats_url = f"{master_url}/stats/requests"
                                        failures_url = f"{master_url}/stats/failures"
                                        exceptions_url = f"{master_url}/stats/exceptions"
                                        logger.info(f"准备请求性能数据: {stats_url}")
                                        try:
                                            resp = requests.get(stats_url, timeout=5)
                                            logger.info(f"性能数据接口响应: status={resp.status_code}")
                                            if resp.status_code == 200:
                                                stats_data = resp.json()
                                                # 添加调试日志，查看数据结构
                                                logger.info(f"Locust API返回的stats_data结构: {json.dumps(stats_data, indent=2)[:1000]}...")
                                                
                                                # 尝试获取更详细的统计数据，包括p90
                                                try:
                                                    # 尝试从其他API端点获取更详细的百分位数数据
                                                    detailed_stats_url = f"{master_url}/stats/requests?include_percentiles=true"
                                                    detailed_resp = requests.get(detailed_stats_url, timeout=5)
                                                    if detailed_resp.status_code == 200:
                                                        detailed_stats = detailed_resp.json()
                                                        logger.info(f"详细统计数据: {json.dumps(detailed_stats, indent=2)[:500]}...")
                                                        # 如果获取到更详细的数据，使用它
                                                        if detailed_stats.get("stats"):
                                                            stats_data = detailed_stats
                                                except Exception as e:
                                                    logger.warning(f"获取详细统计数据失败: {e}")
                                                
                                                # 检查第一个统计项的结构
                                                if stats_data.get("stats") and len(stats_data["stats"]) > 0:
                                                    first_stat = stats_data["stats"][0]
                                                    logger.info(f"第一个统计项的字段: {list(first_stat.keys())}")
                                                    logger.info(f"第一个统计项的完整数据: {json.dumps(first_stat, indent=2)}")
                                                
                                                # 拉取failures和exceptions
                                                failures_data = None
                                                exceptions_data = None
                                                try:
                                                    failures_resp = requests.get(failures_url, timeout=5)
                                                    if failures_resp.status_code == 200:
                                                        failures_data = failures_resp.json()
                                                except Exception as e:
                                                    logger.warning(f"拉取failures失败: {e}")
                                                try:
                                                    exceptions_resp = requests.get(exceptions_url, timeout=5)
                                                    if exceptions_resp.status_code == 200:
                                                        exceptions_data = exceptions_resp.json()
                                                except Exception as e:
                                                    logger.warning(f"拉取exceptions失败: {e}")
                                                final_stats = self._build_final_stats(stats_data, failures_data, exceptions_data)
                                                logger.info(f"处理后的final_stats结构: {json.dumps(final_stats, indent=2)[:1000]}...")
                                                update_report_with_results(self.report_id, final_stats, master_url)
                                                # 检查压测是否结束，自动finalize_report
                                                if stats_data.get('state') == 'stopped':
                                                    logger.info(f"检测到压测已结束，自动finalize_report: {self.report_id}")
                                                    finalize_report(self.report_id, success=True, final_stats=final_stats)
                                                    self.test_running = False
                                                    break
                                            else:
                                                logger.warning(f"性能数据接口返回非200: {resp.status_code}")
                                        except Exception as e:
                                            logger.error(f"性能数据接口请求异常: {e}")
                                    else:
                                        logger.warning("report_id未设置，跳过本轮收集")
                                        logger.warning(f"hasattr(self, 'report_id'): {hasattr(self, 'report_id')}")
                                        logger.warning(f"self.report_id: {getattr(self, 'report_id', 'NOT_SET')}")
                                    time.sleep(10)
                                except Exception as e:
                                    logger.error(f"定时数据收集异常: {e}")
                                    time.sleep(10)
                        except Exception as e:
                            logger.error(f"定时数据收集线程异常: {e}")
                    collection_thread = threading.Thread(target=periodic_data_collection)
                    collection_thread.daemon = True
                    collection_thread.start()
                except Exception as e:
                    logger.error(f"分布式环境线程异常: {e}")
            t = threading.Thread(target=distributed_env_thread)
            t.daemon = True
            t.start()
            self.test_running = True
            return {
                'success': True,
                'message': '分布式测试启动成功，正在后台初始化环境',
                'data': {
                    'master_server': {
                        'id': master_server.id,
                        'name': master_server.name,
                        'host_ip': master_server.host_ip
                    },
                    'worker_count': len(worker_servers),
                    'gui_url': f'http://{master_server.host_ip}:{self.web_port}',
                    'local_gui_url': f'http://local_ip:{self.web_port}',
                    'web_port': self.web_port,
                    'report_id': getattr(self, 'report_id', None)
                }
            }
        except Exception as e:
            import traceback
            traceback.print_exc()
            return {'success': False, 'error': str(e)}
    
    def start_test_with_params(self, users: int, spawn_rate: int, duration: int = None) -> bool:
        """使用参数启动测试"""
        try:
            # 更新任务状态为"执行中"
            try:
                from performance.models import PerformanceTask
                task = PerformanceTask.objects.get(id=self.task_id)
                task.status = '1'  # 执行中
                task.save()
                logger.info(f"已更新任务状态为'执行中': {self.task_id}")
            except Exception as e:
                logger.error(f"更新任务状态失败: {e}")
            
            # 等待主节点Web服务启动
            master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
            logger.info(f"等待主节点Web服务启动: {master_url}")
            
            # 添加网络连接诊断
            logger.info(f"开始网络连接诊断...")
            try:
                # 检查服务器端口监听状态
                ssh = self.ssh_clients.get(self.master_server.id)
                if ssh:
                    stdin, stdout, stderr = ssh.exec_command(f'netstat -tlnp | grep :{self.web_port}', timeout=10)
                    netstat_output = stdout.read().decode().strip()
                    logger.info(f"服务器端口 {self.web_port} 监听状态: {netstat_output}")
                    
                    # 检查防火墙状态
                    stdin, stdout, stderr = ssh.exec_command('systemctl status firewalld', timeout=10)
                    firewall_status = stdout.read().decode().strip()
                    logger.info(f"防火墙状态: {firewall_status[:200]}...")
                    
                    # 检查iptables规则
                    stdin, stdout, stderr = ssh.exec_command(f'iptables -L -n | grep {self.web_port}', timeout=10)
                    iptables_output = stdout.read().decode().strip()
                    logger.info(f"iptables规则 (端口 {self.web_port}): {iptables_output}")
            except Exception as e:
                logger.warning(f"网络诊断失败: {e}")
            
            # 最多等待30秒，每2秒检查一次
            max_retries = 15
            retry_count = 0
            
            while retry_count < max_retries:
                try:
                    # 尝试连接主节点Web界面
                    response = requests.get(f"{master_url}/", timeout=5)
                    if response.status_code == 200:
                        logger.info(f"主节点Web服务已启动: {master_url}")
                        break
                except requests.exceptions.RequestException as e:
                    logger.info(f"等待主节点Web服务启动... ({retry_count + 1}/{max_retries}) - {e}")
                    
                    # 每次重试时都检查服务器状态
                    if retry_count % 3 == 0:  # 每3次重试检查一次
                        try:
                            ssh = self.ssh_clients.get(self.master_server.id)
                            if ssh:
                                # 检查进程状态
                                stdin, stdout, stderr = ssh.exec_command('pgrep -f "locust.*master"', timeout=10)
                                pid = stdout.read().decode().strip()
                                logger.info(f"Locust主进程PID: {pid}")
                                
                                # 检查端口监听
                                stdin, stdout, stderr = ssh.exec_command(f'netstat -tlnp | grep :{self.web_port}', timeout=10)
                                port_status = stdout.read().decode().strip()
                                logger.info(f"端口 {self.web_port} 状态: {port_status}")
                                
                                # 检查日志
                                stdin, stdout, stderr = ssh.exec_command('tail -5 /tmp/locust_test/master.log', timeout=10)
                                log_tail = stdout.read().decode().strip()
                                if log_tail:
                                    logger.info(f"主节点日志尾部: {log_tail}")
                        except Exception as diag_e:
                            logger.warning(f"诊断检查失败: {diag_e}")
                
                retry_count += 1
                time.sleep(2)
            
            if retry_count >= max_retries:
                logger.error(f"主节点Web服务启动超时: {master_url}")
                # 更新任务状态为"执行失败"
                try:
                    from performance.models import PerformanceTask
                    task = PerformanceTask.objects.get(id=self.task_id)
                    task.status = '99'  # 执行失败
                    task.save()
                    logger.info(f"已更新任务状态为'执行失败': {self.task_id}")
                except Exception as e:
                    logger.error(f"更新任务状态失败: {e}")
                return False
            
            # 启动测试
            start_data = {
                'user_count': users,
                'spawn_rate': spawn_rate
            }
            
            logger.info(f"开始启动分布式测试: {start_data}")
            response = requests.post(f"{master_url}/swarm", json=start_data, timeout=10)
            
            if response.status_code == 200:
                self.test_running = True
                logger.info(f"分布式测试启动成功 - 用户数: {users}, 启动速率: {spawn_rate}")
                
                # 如果设置了持续时间，启动定时器
                if duration:
                    threading.Timer(duration, self.stop_distributed_test).start()
                
                return True
            else:
                logger.error(f"分布式测试启动失败: {response.text}")
                # 更新任务状态为"执行失败"
                try:
                    from performance.models import PerformanceTask
                    task = PerformanceTask.objects.get(id=self.task_id)
                    task.status = '99'  # 执行失败
                    task.save()
                    logger.info(f"已更新任务状态为'执行失败': {self.task_id}")
                except Exception as e:
                    logger.error(f"更新任务状态失败: {e}")
                return False
                
        except Exception as e:
            logger.error(f"启动分布式测试失败: {e}")
            # 更新任务状态为"执行失败"
            try:
                from performance.models import PerformanceTask
                task = PerformanceTask.objects.get(id=self.task_id)
                task.status = '99'  # 执行失败
                task.save()
                logger.info(f"已更新任务状态为'执行失败': {self.task_id}")
            except Exception as ex:
                logger.error(f"更新任务状态失败: {ex}")
            return False
    
    def stop_distributed_test(self) -> bool:
        """停止分布式测试"""
        try:
            if not self.test_running:
                return True
            
            # 通过主节点的Web API停止测试
            master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
            
            response = requests.post(f"{master_url}/stop")
            
            if response.status_code == 200:
                self.test_running = False
                logger.info("分布式测试停止成功")
                
                # 完成测试结果收集
                self.finalize_test_results()
                
                # 更新任务状态为"执行完成"
                try:
                    from performance.models import PerformanceTask
                    task = PerformanceTask.objects.get(id=self.task_id)
                    task.status = '0'  # 执行完成
                    task.save()
                    logger.info(f"已更新任务状态为'执行完成': {self.task_id}")
                except Exception as e:
                    logger.error(f"更新任务状态失败: {e}")
                
                return True
            else:
                logger.error(f"分布式测试停止失败: {response.text}")
                
                # 更新任务状态为"执行失败"
                try:
                    from performance.models import PerformanceTask
                    task = PerformanceTask.objects.get(id=self.task_id)
                    task.status = '99'  # 执行失败
                    task.save()
                    logger.info(f"已更新任务状态为'执行失败': {self.task_id}")
                except Exception as e:
                    logger.error(f"更新任务状态失败: {e}")
                
                return False
                
        except Exception as e:
            logger.error(f"停止分布式测试失败: {e}")
            
            # 更新任务状态为"执行失败"
            try:
                from performance.models import PerformanceTask
                task = PerformanceTask.objects.get(id=self.task_id)
                task.status = '99'  # 执行失败
                task.save()
                logger.info(f"已更新任务状态为'执行失败': {self.task_id}")
            except Exception as ex:
                logger.error(f"更新任务状态失败: {ex}")
            
            return False
    
    def _collect_test_results(self):
        """收集测试结果"""
        try:
            master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
            stats_data = None
            failures_data = None
            exceptions_data = None
            # 获取请求统计
            try:
                response = requests.get(f"{master_url}/stats/requests")
                if response.status_code == 200:
                    stats_data = response.json()
            except Exception as e:
                logger.error(f"拉取stats/requests失败: {e}")
            # 获取失败统计
            try:
                response = requests.get(f"{master_url}/stats/failures")
                if response.status_code == 200:
                    failures_data = response.json()
            except Exception as e:
                logger.warning(f"拉取stats/failures失败: {e}")
            # 获取异常统计
            try:
                response = requests.get(f"{master_url}/stats/exceptions")
                if response.status_code == 200:
                    exceptions_data = response.json()
            except Exception as e:
                logger.warning(f"拉取stats/exceptions失败: {e}")
            if stats_data:
                final_stats = self._build_final_stats(stats_data, failures_data, exceptions_data)
                self.test_results = final_stats
                if hasattr(self, 'report_id') and self.report_id:
                    try:
                        from performanceengine.taskResult import update_report_with_results
                        update_report_with_results(self.report_id, final_stats, f"http://{self.master_server.host_ip}:{self.web_port}")
                        logger.info(f"报告结果更新成功: {self.report_id}")
                    except Exception as e:
                        logger.error(f"更新报告结果失败: {e}")
            else:
                logger.error("测试结果收集失败: stats_data为空")
        except Exception as e:
            logger.error(f"收集测试结果失败: {e}")
    
    def finalize_test_results(self):
        """完成测试结果收集"""
        try:
            if hasattr(self, 'report_id') and self.report_id:
                from performanceengine.taskResult import finalize_report
                # 收集最终结果
                self._collect_test_results()
                # 完成报告
                success = finalize_report(
                    self.report_id, 
                    success=True, 
                    final_stats=self.test_results
                )
                if success:
                    logger.info(f"测试报告完成: {self.report_id}")
                else:
                    logger.warning(f"测试报告完成失败: {self.report_id}")
        except Exception as e:
            logger.error(f"完成测试结果失败: {e}")
    
    def cleanup_distributed_test(self):
        """清理分布式测试环境"""
        try:
            # 停止所有进程
            for server in [self.master_server] + self.worker_servers:
                ssh = self.ssh_clients.get(server.id)
                if ssh:
                    ssh.exec_command('pkill -f "locust"')
                    ssh.exec_command('rm -rf /tmp/locust_test')
            
            # 关闭SSH连接
            for ssh in self.ssh_clients.values():
                ssh.close()
            
            self.ssh_clients.clear()
            logger.info("分布式测试环境清理完成")
            
        except Exception as e:
            logger.error(f"清理分布式测试环境失败: {e}")
    
    def get_test_status(self) -> Dict:
        """获取测试状态"""
        try:
            if not self.test_running:
                return {
                    'status': 'stopped',
                    'message': '测试未运行',
                    'results': self.test_results
                }
            
            # 通过主节点的Web API获取测试状态
            master_url = f"http://{self.master_server.host_ip}:{self.web_port}"
            
            try:
                response = requests.get(f"{master_url}/stats/requests")
                
                if response.status_code == 200:
                    stats = response.json()
                    return {
                        'status': 'running',
                        'message': '测试运行中',
                        'stats': stats
                    }
                else:
                    return {
                        'status': 'error',
                        'message': f'获取测试状态失败: {response.text}'
                    }
            except Exception as e:
                return {
                    'status': 'error',
                    'message': f'获取测试状态失败: {str(e)}'
                }
                
        except Exception as e:
            return {
                'status': 'error',
                'message': f'获取测试状态失败: {str(e)}'
            }

    def _estimate_p90_percentile(self, p50, p95):
        """基于p50和p95估算p90百分位数"""
        if p50 == 0 or p95 == 0:
            return 0
        
        # 使用线性插值估算p90
        # p50对应50%，p95对应95%，p90在它们之间
        # 使用公式: p90 = p50 + (p95 - p50) * (90-50)/(95-50) = p50 + (p95 - p50) * 0.89
        p90 = p50 + (p95 - p50) * 0.89
        return round(p90, 2)
    
    def _build_final_stats(self, stats_data, failures_data=None, exceptions_data=None):
        """兼容Locust 2.x及以上，total取Aggregated对象，detailed_stats只包含非Aggregated接口，并正确处理百分位数数据"""
        stats_list = stats_data.get("stats", [])
        stats_dict = {}
        total = {}
        
        # 获取当前用户数（从stats_data中获取），添加类型安全检查
        raw_user_count = stats_data.get("user_count", 0)
        current_users = 0
        
        try:
            if raw_user_count is not None:
                current_users = int(raw_user_count)
        except (ValueError, TypeError):
            current_users = 0
        
        # 获取总体百分位数数据
        current_percentiles = stats_data.get("current_response_time_percentiles", {})
        
        for stat in stats_list:
            name = stat.get("name")
            if name == "Aggregated":
                # 处理总体统计数据，确保包含百分位数
                total = {
                    'num_requests': stat.get('num_requests', 0),
                    'num_failures': stat.get('num_failures', 0),
                    'avg_response_time': stat.get('avg_response_time', 0),
                    'min_response_time': stat.get('min_response_time', 0),
                    'max_response_time': stat.get('max_response_time', 0),
                    'median_response_time': stat.get('median_response_time', 0),
                    'current_rps': stat.get('current_rps', 0),
                    'current_fails_per_sec': stat.get('current_fails_per_sec', 0),
                    # 从current_percentiles中获取总体百分位数
                    'p50_response_time': current_percentiles.get('response_time_percentile_0.5', stat.get('median_response_time', 0)),
                    'p90_response_time': self._estimate_p90_percentile(
                        current_percentiles.get('response_time_percentile_0.5', stat.get('median_response_time', 0)),
                        current_percentiles.get('response_time_percentile_0.95', 0)
                    ),
                    'p95_response_time': current_percentiles.get('response_time_percentile_0.95', 0),
                    'p99_response_time': current_percentiles.get('response_time_percentile_0.99', 0),
                    'current_users': current_users,
                    'current_tps': stat.get('current_rps', 0),
                    'error_rate': (stat.get('num_failures', 0) / max(stat.get('num_requests', 1), 1)) * 100,
                    'elapsed_time': stats_data.get('elapsed_time', 0)
                }
            elif name:
                # 处理详细接口统计数据
                stats_dict[name] = {
                    'name': stat.get('name', ''),
                    'method': stat.get('method', ''),
                    'path': stat.get('name', ''),
                    'num_requests': stat.get('num_requests', 0),
                    'num_failures': stat.get('num_failures', 0),
                    'success_requests': stat.get('num_requests', 0) - stat.get('num_failures', 0),
                    'avg_response_time': stat.get('avg_response_time', 0),
                    'min_response_time': stat.get('min_response_time', 0),
                    'max_response_time': stat.get('max_response_time', 0),
                    'median_response_time': stat.get('median_response_time', 0),
                    # 从接口统计中获取百分位数，使用正确的字段名
                    'p50_response_time': stat.get('response_time_percentile_0.5', stat.get('median_response_time', 0)),
                    'p90_response_time': self._estimate_p90_percentile(
                        stat.get('response_time_percentile_0.5', stat.get('median_response_time', 0)),
                        stat.get('response_time_percentile_0.95', 0)
                    ),
                    'p95_response_time': stat.get('response_time_percentile_0.95', 0),
                    'p99_response_time': stat.get('response_time_percentile_0.99', 0),
                    'current_rps': stat.get('current_rps', 0),
                    'error_rate': (stat.get('num_failures', 0) / max(stat.get('num_requests', 1), 1)) * 100,
                    'current_users': current_users,
                    'current_tps': stat.get('current_rps', 0)
                }
        
        final_stats = {
            "total": total,
            "detailed_stats": stats_dict,
        }
        if failures_data is not None:
            final_stats["failures"] = failures_data
        if exceptions_data is not None:
            final_stats["exceptions"] = exceptions_data
        
        # 添加时间戳
        from django.utils import timezone
        final_stats['timestamp'] = timezone.now().isoformat()
        
        return final_stats


# 全局分布式测试管理器实例
_distributed_manager_instances = {}


def get_distributed_manager(task_id: int = None) -> DistributedTestManager:
    """获取分布式测试管理器单例"""
    global _distributed_manager_instances
    
    if task_id is None:
        # 如果没有提供task_id，创建一个新的实例但不缓存
        return DistributedTestManager(0)  # 使用0作为临时ID
        
    if task_id not in _distributed_manager_instances:
        _distributed_manager_instances[task_id] = DistributedTestManager(task_id)
    
    return _distributed_manager_instances[task_id]


def cleanup_distributed_manager(task_id: int):
    """清理分布式测试管理器"""
    if task_id in _distributed_manager_instances:
        _distributed_manager_instances[task_id].cleanup_distributed_test()
        del _distributed_manager_instances[task_id]