#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Auto-sklearn手动Python并行计算示例
Manual Python Parallel Computing Example

本示例演示如何使用Python代码手动控制Dask分布式集群，实现Auto-sklearn的并行训练。
通过代码方式启动调度器和工作节点，提供更灵活的分布式控制能力。

作者: AutoML学习团队
日期: 2024-07-13
"""

import asyncio
import multiprocessing
import os
import time
import warnings
from datetime import datetime
from pprint import pprint

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import classification_report, confusion_matrix

import dask
import dask.distributed
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.constants import MULTICLASS_CLASSIFICATION

# 忽略警告信息
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class DistributedClusterManager:
    """
    分布式集群管理器
    
    负责管理Dask分布式集群的生命周期，包括调度器启动、工作节点管理、
    客户端连接等。提供完整的分布式计算环境控制能力。
    """
    
    def __init__(self, n_workers=2, worker_memory_limit='2GB', threads_per_worker=1):
        """
        初始化分布式集群管理器
        
        参数:
            n_workers (int): 工作节点数量
            worker_memory_limit (str): 每个工作节点内存限制
            threads_per_worker (int): 每个工作节点线程数
        """
        self.n_workers = n_workers
        self.worker_memory_limit = worker_memory_limit
        self.threads_per_worker = threads_per_worker
        self.cluster = None
        self.client = None
        self.worker_processes = []
        self.scheduler_info = {}
        
        print(f"🎯 初始化分布式集群管理器")
        print(f"   工作节点数量: {n_workers}")
        print(f"   内存限制: {worker_memory_limit}")
        print(f"   线程数/节点: {threads_per_worker}")
    
    def start_cluster(self):
        """
        启动分布式集群
        
        创建本地集群（不自动创建工作节点），然后手动启动工作节点，
        最后建立客户端连接。
        """
        print(f"\n🏗️  启动分布式集群")
        
        try:
            # 1. 创建本地集群（无工作节点）
            print(f"   创建调度器...")
            self.cluster = dask.distributed.LocalCluster(
                n_workers=0,                      # 不自动创建工作节点
                processes=True,                   # 使用进程而非线程
                threads_per_worker=self.threads_per_worker,
                silence_logs=False,               # 显示日志便于调试
                dashboard_address=':8787',        # 仪表板地址
            )
            
            scheduler_address = self.cluster.scheduler_address
            print(f"   ✅ 调度器启动成功: {scheduler_address}")
            print(f"   📊 仪表板地址: http://localhost:8787")
            
            # 2. 手动启动工作节点
            print(f"   启动{self.n_workers}个工作节点...")
            self._spawn_workers(scheduler_address)
            
            # 等待工作节点连接
            time.sleep(2)
            
            # 3. 创建客户端连接
            print(f"   创建客户端连接...")
            self.client = dask.distributed.Client(
                address=scheduler_address,
                timeout='10s'
            )
            
            # 4. 验证集群状态
            self._verify_cluster_status()
            
            print(f"   ✅ 分布式集群启动完成!")
            
        except Exception as e:
            print(f"   ❌ 集群启动失败: {str(e)}")
            self.cleanup()
            raise
    
    def _spawn_workers(self, scheduler_address):
        """
        生成工作节点进程
        
        参数:
            scheduler_address (str): 调度器地址
        """
        for i in range(self.n_workers):
            worker_process = multiprocessing.Process(
                target=self._start_worker,
                args=(scheduler_address, i),
                name=f"DaskWorker-{i}"
            )
            worker_process.start()
            self.worker_processes.append(worker_process)
            print(f"     Worker-{i} 进程启动: PID={worker_process.pid}")
    
    def _start_worker(self, scheduler_address, worker_id):
        """
        启动单个工作节点
        
        参数:
            scheduler_address (str): 调度器地址
            worker_id (int): 工作节点ID
        """
        # 设置Dask配置
        dask.config.set({"distributed.worker.daemon": False})
        
        async def run_worker():
            """异步运行工作节点"""
            try:
                async with dask.distributed.Nanny(
                    scheduler_ip=scheduler_address,
                    nthreads=self.threads_per_worker,
                    lifetime=60,                  # 60秒生命周期（演示用）
                    memory_limit=0,               # 禁用Dask内存管理
                    name=f"worker-{worker_id}",
                    silence_logs=False
                ) as worker:
                    print(f"     Worker-{worker_id} 已连接到调度器")
                    await worker.finished()
                    
            except Exception as e:
                print(f"     Worker-{worker_id} 启动失败: {str(e)}")
        
        # 运行异步事件循环
        try:
            asyncio.run(run_worker())
        except Exception as e:
            print(f"     Worker-{worker_id} 异步运行失败: {str(e)}")
    
    def _verify_cluster_status(self):
        """验证集群状态"""
        if self.client:
            # 等待工作节点连接
            connected_workers = 0
            max_wait = 10  # 最多等待10秒
            
            for _ in range(max_wait):
                worker_info = self.client.scheduler_info()['workers']
                connected_workers = len(worker_info)
                
                if connected_workers >= self.n_workers:
                    break
                    
                time.sleep(1)
            
            # 记录集群信息
            self.scheduler_info = {
                'address': self.cluster.scheduler_address,
                'workers': connected_workers,
                'expected_workers': self.n_workers,
                'dashboard': 'http://localhost:8787'
            }
            
            print(f"   📊 集群状态:")
            print(f"     连接的工作节点: {connected_workers}/{self.n_workers}")
            print(f"     调度器地址: {self.cluster.scheduler_address}")
    
    def get_client(self):
        """
        获取Dask客户端
        
        返回:
            dask.distributed.Client: Dask客户端实例
        """
        if not self.client:
            raise RuntimeError("集群未启动或客户端未初始化")
        return self.client
    
    def get_cluster_info(self):
        """
        获取集群信息
        
        返回:
            dict: 集群状态信息
        """
        if self.client:
            return {
                'scheduler_info': self.scheduler_info,
                'worker_info': self.client.scheduler_info()['workers'],
                'task_stream': self.client.scheduler_info().get('task_stream', []),
                'status': 'running' if self.client.status == 'running' else 'error'
            }
        return {'status': 'not_started'}
    
    def cleanup(self):
        """清理集群资源"""
        print(f"\n🧹 清理分布式集群资源")
        
        # 关闭客户端
        if self.client:
            try:
                self.client.close()
                print(f"   ✅ 客户端已关闭")
            except Exception as e:
                print(f"   ⚠️  客户端关闭失败: {str(e)}")
        
        # 关闭集群
        if self.cluster:
            try:
                self.cluster.close()
                print(f"   ✅ 集群已关闭")
            except Exception as e:
                print(f"   ⚠️  集群关闭失败: {str(e)}")
        
        # 终止工作节点进程
        for i, process in enumerate(self.worker_processes):
            try:
                if process.is_alive():
                    process.terminate()
                    process.join(timeout=5)
                    if process.is_alive():
                        process.kill()
                print(f"   ✅ Worker-{i} 进程已终止")
            except Exception as e:
                print(f"   ⚠️  Worker-{i} 终止失败: {str(e)}")
        
        print(f"   🎉 资源清理完成")


class DistributedAutoMLCoordinator:
    """
    分布式AutoML协调器
    
    管理Auto-sklearn的分布式训练过程，包括模型配置、训练协调、
    结果聚合等功能。
    """
    
    def __init__(self, dask_client, tmp_folder="/tmp/distributed_automl"):
        """
        初始化分布式AutoML协调器
        
        参数:
            dask_client: Dask客户端
            tmp_folder (str): 临时文件夹
        """
        self.dask_client = dask_client
        self.tmp_folder = tmp_folder
        self.automl = None
        self.training_history = {}
        
        # 创建临时文件夹
        os.makedirs(tmp_folder, exist_ok=True)
        
        print(f"🎯 初始化分布式AutoML协调器")
        print(f"   临时文件夹: {tmp_folder}")
    
    def create_distributed_automl(self, time_budget=60, **kwargs):
        """
        创建分布式AutoML实例
        
        参数:
            time_budget (int): 时间预算（秒）
            **kwargs: 其他AutoML参数
        """
        print(f"\n🏗️  创建分布式AutoML实例")
        
        # 默认配置
        default_config = {
            'time_left_for_this_task': time_budget,
            'per_run_time_limit': max(10, time_budget // 6),
            'memory_limit': 2048,
            'tmp_folder': self.tmp_folder,
            
            # 分布式配置
            'dask_client': self.dask_client,
            'n_jobs': 1,  # 被dask_client覆盖
            
            # 算法选择
            'include_estimators': [
                'extra_trees', 
                'random_forest', 
                'gradient_boosting',
                'sgd'
            ],
            'include_preprocessors': ['no_preprocessing'],
            
            # 验证策略
            'resampling_strategy': 'holdout',
            'resampling_strategy_arguments': {'train_size': 0.67},
            
            # 其他设置
            'delete_tmp_folder_after_terminate': False,
            'disable_evaluator_output': False,
            'seed': 42
        }
        
        # 合并用户配置
        config = {**default_config, **kwargs}
        
        self.automl = AutoSklearnClassifier(**config)
        
        print(f"   ✅ 分布式AutoML实例创建完成")
        print(f"   时间预算: {time_budget}秒")
        print(f"   工作节点: {len(self.dask_client.scheduler_info()['workers'])}个")
        
        return self.automl
    
    def distributed_fit(self, X_train, y_train, dataset_name="distributed_dataset"):
        """
        执行分布式训练
        
        参数:
            X_train: 训练特征
            y_train: 训练标签
            dataset_name (str): 数据集名称
        """
        if not self.automl:
            raise ValueError("请先创建AutoML实例")
        
        print(f"\n🚀 开始分布式训练")
        print(f"   数据集: {dataset_name}")
        print(f"   训练样本: {X_train.shape[0]}")
        print(f"   特征数量: {X_train.shape[1]}")
        
        # 记录训练开始时间
        start_time = time.time()
        
        try:
            # 执行分布式训练
            self.automl.fit(X_train, y_train, dataset_name=dataset_name)
            
            # 记录训练历史
            end_time = time.time()
            training_time = end_time - start_time
            
            self.training_history = {
                'dataset_name': dataset_name,
                'training_time': training_time,
                'start_time': datetime.fromtimestamp(start_time),
                'end_time': datetime.fromtimestamp(end_time),
                'data_shape': X_train.shape,
                'worker_count': len(self.dask_client.scheduler_info()['workers'])
            }
            
            print(f"   ✅ 分布式训练完成")
            print(f"   训练时间: {training_time:.2f}秒")
            
        except Exception as e:
            print(f"   ❌ 分布式训练失败: {str(e)}")
            raise
    
    def distributed_ensemble_fit(self, y_train, **kwargs):
        """
        执行分布式集成构建
        
        参数:
            y_train: 训练标签
            **kwargs: 集成参数
        """
        if not self.automl:
            raise ValueError("请先创建AutoML实例并完成训练")
        
        print(f"\n🔧 开始分布式集成构建")
        
        # 默认集成配置
        ensemble_config = {
            'task': MULTICLASS_CLASSIFICATION,
            'dataset_name': self.training_history.get('dataset_name', 'unknown'),
            'ensemble_kwargs': {'ensemble_size': 20},
            'ensemble_nbest': 50
        }
        
        # 合并用户配置
        config = {**ensemble_config, **kwargs}
        
        try:
            start_time = time.time()
            
            self.automl.fit_ensemble(y_train, **config)
            
            ensemble_time = time.time() - start_time
            self.training_history['ensemble_time'] = ensemble_time
            
            print(f"   ✅ 分布式集成构建完成")
            print(f"   集成时间: {ensemble_time:.2f}秒")
            
        except Exception as e:
            print(f"   ❌ 分布式集成构建失败: {str(e)}")
            raise
    
    def get_distributed_models(self):
        """
        获取分布式训练的模型信息
        
        返回:
            dict: 模型信息
        """
        if not self.automl:
            return {}
        
        try:
            models = self.automl.show_models()
            return {
                'model_count': len(models),
                'models': dict(models) if models else {},
                'algorithm_diversity': len(set(type(model).__name__ for _, model in models.items())) if models else 0
            }
        except Exception:
            return {'model_count': 0, 'models': {}, 'algorithm_diversity': 0}
    
    def get_training_statistics(self):
        """
        获取训练统计信息
        
        返回:
            str: 训练统计信息
        """
        if not self.automl:
            return "AutoML实例未创建"
        
        try:
            return self.automl.sprint_statistics()
        except Exception as e:
            return f"统计信息获取失败: {str(e)}"


class PerformanceAnalyzer:
    """
    分布式性能分析器
    
    分析分布式训练的性能指标，包括并行效率、资源利用率、
    通信开销等。
    """
    
    def __init__(self):
        """初始化性能分析器"""
        self.analysis_results = {}
    
    def analyze_distributed_performance(self, coordinator, cluster_manager, 
                                      baseline_time=None):
        """
        分析分布式性能
        
        参数:
            coordinator: 分布式协调器
            cluster_manager: 集群管理器
            baseline_time (float): 基准时间（单机训练时间）
        
        返回:
            dict: 性能分析结果
        """
        print(f"\n📊 分布式性能分析")
        
        # 获取训练历史
        training_history = coordinator.training_history
        cluster_info = cluster_manager.get_cluster_info()
        
        if not training_history:
            print(f"   ⚠️  无训练历史数据")
            return {}
        
        # 基本性能指标
        distributed_time = training_history.get('training_time', 0)
        worker_count = training_history.get('worker_count', 1)
        
        performance_metrics = {
            'distributed_training_time': distributed_time,
            'worker_count': worker_count,
            'data_shape': training_history.get('data_shape', (0, 0))
        }
        
        # 并行效率分析
        if baseline_time and baseline_time > 0:
            speedup = baseline_time / distributed_time
            efficiency = speedup / worker_count
            
            performance_metrics.update({
                'baseline_time': baseline_time,
                'speedup': speedup,
                'parallel_efficiency': efficiency,
                'time_reduction': (baseline_time - distributed_time) / baseline_time
            })
            
            print(f"   ⚡ 并行性能:")
            print(f"     基准时间: {baseline_time:.2f}秒")
            print(f"     分布式时间: {distributed_time:.2f}秒")
            print(f"     加速比: {speedup:.2f}x")
            print(f"     并行效率: {efficiency:.2%}")
        
        # 资源使用分析
        if cluster_info.get('worker_info'):
            worker_info = cluster_info['worker_info']
            
            total_cores = sum(info.get('nthreads', 1) for info in worker_info.values())
            total_memory = sum(info.get('memory_limit', 0) for info in worker_info.values())
            
            performance_metrics.update({
                'total_cores': total_cores,
                'total_memory_gb': total_memory / (1024**3) if total_memory > 0 else 0,
                'cores_per_worker': total_cores / worker_count if worker_count > 0 else 0
            })
            
            print(f"   🔧 资源配置:")
            print(f"     总核心数: {total_cores}")
            print(f"     总内存: {total_memory / (1024**3):.1f}GB" if total_memory > 0 else "     总内存: 未限制")
            print(f"     平均核心/节点: {total_cores / worker_count:.1f}")
        
        # 通信开销估算
        samples, features = training_history.get('data_shape', (0, 0))
        if samples > 0 and features > 0:
            # 估算数据传输量
            data_size_mb = (samples * features * 8) / (1024**2)  # 假设float64
            comm_overhead_ratio = min(0.1, data_size_mb / (distributed_time * 10))  # 经验公式
            
            performance_metrics.update({
                'estimated_data_size_mb': data_size_mb,
                'estimated_comm_overhead': comm_overhead_ratio,
                'effective_compute_time': distributed_time * (1 - comm_overhead_ratio)
            })
            
            print(f"   📡 通信分析:")
            print(f"     估算数据大小: {data_size_mb:.1f}MB")
            print(f"     估算通信开销: {comm_overhead_ratio:.1%}")
        
        self.analysis_results = performance_metrics
        return performance_metrics
    
    def generate_performance_report(self):
        """
        生成性能报告
        
        返回:
            str: 格式化的性能报告
        """
        if not self.analysis_results:
            return "无性能分析数据"
        
        metrics = self.analysis_results
        report_lines = [
            "📊 分布式性能报告",
            "=" * 50,
            ""
        ]
        
        # 基本信息
        report_lines.extend([
            "🔍 基本信息:",
            f"  训练时间: {metrics.get('distributed_training_time', 0):.2f}秒",
            f"  工作节点: {metrics.get('worker_count', 0)}个",
            f"  数据规模: {metrics.get('data_shape', (0, 0))}",
            ""
        ])
        
        # 并行性能
        if 'speedup' in metrics:
            report_lines.extend([
                "⚡ 并行性能:",
                f"  加速比: {metrics['speedup']:.2f}x",
                f"  并行效率: {metrics['parallel_efficiency']:.1%}",
                f"  时间缩减: {metrics['time_reduction']:.1%}",
                ""
            ])
        
        # 资源利用
        if 'total_cores' in metrics:
            report_lines.extend([
                "🔧 资源利用:",
                f"  总核心数: {metrics['total_cores']}",
                f"  总内存: {metrics.get('total_memory_gb', 0):.1f}GB",
                f"  核心/节点: {metrics.get('cores_per_worker', 0):.1f}",
                ""
            ])
        
        # 通信分析
        if 'estimated_comm_overhead' in metrics:
            report_lines.extend([
                "📡 通信分析:",
                f"  数据大小: {metrics['estimated_data_size_mb']:.1f}MB",
                f"  通信开销: {metrics['estimated_comm_overhead']:.1%}",
                f"  有效计算时间: {metrics['effective_compute_time']:.2f}秒",
                ""
            ])
        
        return "\n".join(report_lines)


class DistributedVisualizer:
    """
    分布式训练可视化器
    
    创建分布式训练过程和结果的可视化图表。
    """
    
    def __init__(self, output_dir="../results"):
        """
        初始化可视化器
        
        参数:
            output_dir (str): 输出目录
        """
        self.output_dir = output_dir
        os.makedirs(output_dir, exist_ok=True)
        
        # 设置图表风格
        plt.style.use('default')
        sns.set_palette("husl")
    
    def create_cluster_topology_chart(self, cluster_info):
        """
        创建集群拓扑图
        
        参数:
            cluster_info (dict): 集群信息
        """
        fig, ax = plt.subplots(1, 1, figsize=(12, 8))
        fig.suptitle('Dask分布式集群拓扑结构', fontsize=16, fontweight='bold')
        
        # 模拟拓扑布局
        scheduler_info = cluster_info.get('scheduler_info', {})
        worker_info = cluster_info.get('worker_info', {})
        
        # 调度器位置（中心）
        scheduler_x, scheduler_y = 0.5, 0.8
        
        # 绘制调度器
        scheduler_circle = plt.Circle((scheduler_x, scheduler_y), 0.08, 
                                    color='red', alpha=0.7)
        ax.add_patch(scheduler_circle)
        ax.text(scheduler_x, scheduler_y, 'Scheduler\n调度器', 
               ha='center', va='center', fontweight='bold', fontsize=10)
        
        # 绘制工作节点
        worker_count = len(worker_info)
        if worker_count > 0:
            for i, (worker_id, info) in enumerate(worker_info.items()):
                # 计算工作节点位置（围绕调度器）
                angle = 2 * np.pi * i / worker_count
                worker_x = scheduler_x + 0.3 * np.cos(angle)
                worker_y = scheduler_y + 0.3 * np.sin(angle)
                
                # 绘制工作节点
                worker_circle = plt.Circle((worker_x, worker_y), 0.06, 
                                         color='blue', alpha=0.7)
                ax.add_patch(worker_circle)
                
                # 标签
                worker_name = worker_id.split('-')[-1] if '-' in worker_id else str(i)
                ax.text(worker_x, worker_y, f'Worker\n{worker_name}', 
                       ha='center', va='center', fontsize=8)
                
                # 连接线
                ax.plot([scheduler_x, worker_x], [scheduler_y, worker_y], 
                       'k--', alpha=0.5, linewidth=1)
        
        # 绘制客户端
        client_x, client_y = 0.5, 0.2
        client_circle = plt.Circle((client_x, client_y), 0.06, 
                                 color='green', alpha=0.7)
        ax.add_patch(client_circle)
        ax.text(client_x, client_y, 'Client\n客户端', 
               ha='center', va='center', fontweight='bold', fontsize=10)
        
        # 客户端到调度器连接
        ax.plot([client_x, scheduler_x], [client_y, scheduler_y], 
               'g-', alpha=0.7, linewidth=2)
        
        # 设置图表属性
        ax.set_xlim(0, 1)
        ax.set_ylim(0, 1)
        ax.set_aspect('equal')
        ax.axis('off')
        
        # 添加图例
        legend_elements = [
            plt.Circle((0, 0), 0.02, color='red', alpha=0.7, label='调度器 (Scheduler)'),
            plt.Circle((0, 0), 0.02, color='blue', alpha=0.7, label='工作节点 (Worker)'),
            plt.Circle((0, 0), 0.02, color='green', alpha=0.7, label='客户端 (Client)')
        ]
        ax.legend(handles=legend_elements, loc='upper left')
        
        # 添加集群信息
        info_text = f"""集群信息:
调度器地址: {scheduler_info.get('address', 'Unknown')}
工作节点数: {len(worker_info)}
仪表板: {scheduler_info.get('dashboard', 'N/A')}"""
        
        ax.text(0.02, 0.98, info_text, transform=ax.transAxes, 
               verticalalignment='top', fontsize=9,
               bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.output_dir, 'cluster_topology.png'), 
                   dpi=300, bbox_inches='tight')
        plt.show()
    
    def create_performance_comparison_chart(self, performance_metrics):
        """
        创建性能对比图表
        
        参数:
            performance_metrics (dict): 性能指标
        """
        if not performance_metrics or 'speedup' not in performance_metrics:
            print("⚠️  缺少性能对比数据")
            return
        
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        fig.suptitle('分布式训练性能分析', fontsize=16, fontweight='bold')
        
        # 1. 时间对比
        times = [
            performance_metrics.get('baseline_time', 0),
            performance_metrics.get('distributed_training_time', 0)
        ]
        labels = ['单机训练', '分布式训练']
        colors = ['#ff7f0e', '#2ca02c']
        
        bars = axes[0, 0].bar(labels, times, color=colors)
        axes[0, 0].set_title('训练时间对比')
        axes[0, 0].set_ylabel('时间 (秒)')
        
        for i, v in enumerate(times):
            axes[0, 0].text(i, v + max(times)*0.01, f'{v:.1f}s', 
                           ha='center', va='bottom')
        
        # 2. 加速比和效率
        speedup = performance_metrics.get('speedup', 1)
        efficiency = performance_metrics.get('parallel_efficiency', 1)
        worker_count = performance_metrics.get('worker_count', 1)
        
        metrics = [speedup, efficiency * worker_count]  # 归一化效率
        metric_labels = ['实际加速比', '理想加速比']
        colors = ['#1f77b4', '#d62728']
        
        bars = axes[0, 1].bar(metric_labels, metrics, color=colors)
        axes[0, 1].set_title('并行效率分析')
        axes[0, 1].set_ylabel('加速比')
        axes[0, 1].axhline(y=worker_count, color='gray', linestyle='--', 
                          label=f'理论最大值({worker_count}x)')
        
        for i, v in enumerate(metrics):
            axes[0, 1].text(i, v + max(metrics)*0.02, f'{v:.2f}x', 
                           ha='center', va='bottom')
        
        axes[0, 1].legend()
        
        # 3. 资源利用饼图
        if 'total_cores' in performance_metrics:
            total_cores = performance_metrics.get('total_cores', 1)
            used_cores = worker_count  # 简化假设
            
            sizes = [used_cores, total_cores - used_cores]
            labels = [f'使用核心({used_cores})', f'空闲核心({total_cores - used_cores})']
            colors = ['#ff9999', '#66b3ff']
            
            axes[1, 0].pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
                          startangle=90)
            axes[1, 0].set_title('CPU核心利用率')
        
        # 4. 通信开销分析
        if 'estimated_comm_overhead' in performance_metrics:
            comm_overhead = performance_metrics.get('estimated_comm_overhead', 0)
            compute_time = 1 - comm_overhead
            
            sizes = [compute_time, comm_overhead]
            labels = ['计算时间', '通信开销']
            colors = ['#90EE90', '#FFB6C1']
            
            axes[1, 1].pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
                          startangle=90)
            axes[1, 1].set_title('时间分配分析')
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.output_dir, 'performance_analysis.png'), 
                   dpi=300, bbox_inches='tight')
        plt.show()


def main():
    """
    主函数：演示Auto-sklearn手动Python并行计算
    """
    print("🎯 Auto-sklearn手动Python并行计算示例")
    print("=" * 60)
    
    # 1. 数据加载和预处理
    print("\n📊 加载Wisconsin乳腺癌数据集")
    X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )
    
    print(f"   数据集形状: {X.shape}")
    print(f"   训练集: {X_train.shape[0]}样本")
    print(f"   测试集: {X_test.shape[0]}样本")
    print(f"   特征数量: {X_train.shape[1]}个")
    print(f"   类别分布 - 训练集: 良性{np.sum(y_train==0)}, 恶性{np.sum(y_train==1)}")
    print(f"   类别分布 - 测试集: 良性{np.sum(y_test==0)}, 恶性{np.sum(y_test==1)}")
    
    # 2. 创建和启动分布式集群
    cluster_manager = DistributedClusterManager(
        n_workers=2,                    # 2个工作节点
        worker_memory_limit='2GB',      # 每个节点2GB内存
        threads_per_worker=1            # 每个节点1个线程
    )
    
    try:
        # 启动集群
        cluster_manager.start_cluster()
        
        # 获取Dask客户端
        dask_client = cluster_manager.get_client()
        
        # 显示集群状态
        cluster_info = cluster_manager.get_cluster_info()
        print(f"\n📊 集群状态: {cluster_info['status']}")
        print(f"   活跃工作节点: {len(cluster_info.get('worker_info', {}))}")
        
        # 3. 创建分布式AutoML协调器
        tmp_folder = f"/tmp/distributed_automl_{int(time.time())}"
        coordinator = DistributedAutoMLCoordinator(dask_client, tmp_folder)
        
        # 创建分布式AutoML实例
        time_budget = 45  # 45秒训练时间
        automl = coordinator.create_distributed_automl(
            time_budget=time_budget,
            include_estimators=['extra_trees', 'random_forest', 'gradient_boosting']
        )
        
        # 4. 执行分布式训练
        print(f"\n🚀 开始分布式训练 (预算: {time_budget}秒)")
        training_start = time.time()
        
        # 分布式拟合
        coordinator.distributed_fit(X_train, y_train, "breast_cancer_distributed")
        
        # 分布式集成构建
        coordinator.distributed_ensemble_fit(y_train)
        
        training_end = time.time()
        total_training_time = training_end - training_start
        
        print(f"   ✅ 分布式训练完成")
        print(f"   总训练时间: {total_training_time:.2f}秒")
        
        # 5. 模型评估
        print(f"\n📈 模型评估")
        
        # 预测
        predictions = automl.predict(X_test)
        probabilities = automl.predict_proba(X_test)
        
        # 计算评估指标
        metrics = {
            'accuracy': sklearn.metrics.accuracy_score(y_test, predictions),
            'precision': sklearn.metrics.precision_score(y_test, predictions, average='weighted'),
            'recall': sklearn.metrics.recall_score(y_test, predictions, average='weighted'),
            'f1_score': sklearn.metrics.f1_score(y_test, predictions, average='weighted'),
            'auc': sklearn.metrics.roc_auc_score(y_test, probabilities[:, 1]),
            'confusion_matrix': confusion_matrix(y_test, predictions)
        }
        
        print(f"   准确率: {metrics['accuracy']:.4f}")
        print(f"   精确率: {metrics['precision']:.4f}")
        print(f"   召回率: {metrics['recall']:.4f}")
        print(f"   F1分数: {metrics['f1_score']:.4f}")
        print(f"   AUC: {metrics['auc']:.4f}")
        
        # 6. 模型信息
        model_info = coordinator.get_distributed_models()
        print(f"\n🔧 分布式训练模型信息")
        print(f"   集成模型数量: {model_info['model_count']}")
        print(f"   算法多样性: {model_info['algorithm_diversity']}")
        
        if model_info['models']:
            print(f"   最佳模型:")
            for rank, (score, model) in enumerate(list(model_info['models'].items())[:3]):
                print(f"     {rank+1}. {type(model).__name__} (分数: {score:.4f})")
        
        # 7. 性能分析
        analyzer = PerformanceAnalyzer()
        
        # 估算单机基准时间（简单估算）
        baseline_time = total_training_time * 1.5  # 假设单机需要更多时间
        
        performance_metrics = analyzer.analyze_distributed_performance(
            coordinator, cluster_manager, baseline_time
        )
        
        # 生成性能报告
        performance_report = analyzer.generate_performance_report()
        print(f"\n{performance_report}")
        
        # 8. 可视化分析
        print(f"\n📊 生成可视化分析")
        visualizer = DistributedVisualizer("../results")
        
        # 集群拓扑图
        visualizer.create_cluster_topology_chart(cluster_info)
        
        # 性能对比图
        visualizer.create_performance_comparison_chart(performance_metrics)
        
        # 9. 训练统计信息
        print(f"\n📋 训练统计信息")
        training_stats = coordinator.get_training_statistics()
        print(training_stats)
        
        # 10. 详细分类报告
        print(f"\n📊 详细分类报告")
        print(classification_report(y_test, predictions, 
                                  target_names=['良性', '恶性']))
        
        print(f"\n📊 混淆矩阵")
        print(metrics['confusion_matrix'])
        
        # 11. 分布式优势总结
        print(f"\n🎉 分布式训练优势总结")
        
        if 'speedup' in performance_metrics:
            speedup = performance_metrics['speedup']
            efficiency = performance_metrics['parallel_efficiency']
            
            print(f"   ⚡ 性能提升:")
            print(f"     训练加速: {speedup:.2f}倍")
            print(f"     并行效率: {efficiency:.1%}")
            print(f"     时间节约: {(1 - 1/speedup)*100:.1f}%")
        
        worker_count = len(cluster_info.get('worker_info', {}))
        print(f"   🔧 资源利用:")
        print(f"     并行工作节点: {worker_count}个")
        print(f"     内存扩展: {worker_count * 2}GB")
        print(f"     计算能力提升: {worker_count}倍")
        
        print(f"   📈 可扩展性:")
        print(f"     支持多机扩展: ✅")
        print(f"     动态节点管理: ✅")
        print(f"     容错能力: ✅")
        
        return {
            'coordinator': coordinator,
            'cluster_manager': cluster_manager,
            'metrics': metrics,
            'performance_metrics': performance_metrics,
            'model_info': model_info
        }
        
    except Exception as e:
        print(f"\n❌ 分布式训练失败: {str(e)}")
        import traceback
        traceback.print_exc()
        return None
        
    finally:
        # 清理资源
        cluster_manager.cleanup()


if __name__ == "__main__":
    # 设置随机种子
    np.random.seed(42)
    
    print(f"⚠️  重要提示: 手动Python并行需要在 `if __name__ == '__main__'` 保护下运行")
    print(f"   这是为了避免多进程创建时的问题")
    print()
    
    try:
        results = main()
        
        if results:
            print(f"\n✅ 分布式训练示例执行成功!")
            print(f"   最终准确率: {results['metrics']['accuracy']:.4f}")
            print(f"   集成模型数: {results['model_info']['model_count']}")
            
            if 'speedup' in results['performance_metrics']:
                speedup = results['performance_metrics']['speedup']
                print(f"   性能提升: {speedup:.2f}倍")
        else:
            print(f"\n❌ 分布式训练示例执行失败")
        
    except KeyboardInterrupt:
        print(f"\n⚠️  程序被用户中断")
        
    except Exception as e:
        print(f"\n❌ 程序执行失败: {str(e)}")
        import traceback
        traceback.print_exc()
        
    finally:
        print(f"\n👋 感谢使用Auto-sklearn手动Python并行示例!")
        print(f"   📊 可视化结果保存在: ../results/")
        print(f"   🗂️  临时文件: /tmp/distributed_automl_*")