#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Auto-sklearn CLI手动并行计算示例
Manual CLI Parallel Computing Example

本示例演示如何使用命令行工具手动管理Dask分布式集群，实现生产级的
Auto-sklearn分布式训练。通过CLI方式启动调度器和工作节点，提供企业级的
部署和运维能力。

作者: AutoML学习团队
日期: 2024-07-13
"""

import os
import time
import subprocess
import signal
import json
import warnings
from datetime import datetime
from pathlib import Path
from pprint import pprint

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import classification_report, confusion_matrix

import dask.distributed
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.constants import MULTICLASS_CLASSIFICATION

# 忽略警告信息
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class CLISchedulerManager:
    """
    CLI调度器管理器
    
    负责管理Dask调度器的CLI启动、监控和停止。提供生产级的调度器
    管理功能，包括健康检查、日志管理、故障恢复等。
    """
    
    def __init__(self, scheduler_file="scheduler-file.json", 
                 port=8786, dashboard_port=8787, idle_timeout=300):
        """
        初始化CLI调度器管理器
        
        参数:
            scheduler_file (str): 调度器信息文件
            port (int): 调度器端口
            dashboard_port (int): 仪表板端口
            idle_timeout (int): 空闲超时时间（秒）
        """
        self.scheduler_file = scheduler_file
        self.port = port
        self.dashboard_port = dashboard_port
        self.idle_timeout = idle_timeout
        self.scheduler_process = None
        self.scheduler_info = {}
        
        # 清理可能存在的调度器文件
        if os.path.exists(scheduler_file):
            os.remove(scheduler_file)
        
        print(f"🎯 初始化CLI调度器管理器")
        print(f"   调度器文件: {scheduler_file}")
        print(f"   调度器端口: {port}")
        print(f"   仪表板端口: {dashboard_port}")
        print(f"   空闲超时: {idle_timeout}秒")
    
    def start_scheduler(self):
        """
        启动CLI调度器
        
        使用dask-scheduler命令启动调度器，并配置相关参数。
        """
        print(f"\n🚀 启动CLI调度器")
        
        try:
            # 构建调度器启动命令
            command = [
                "dask-scheduler",
                "--scheduler-file", self.scheduler_file,
                "--port", str(self.port),
                "--dashboard-address", f":{self.dashboard_port}",
                "--idle-timeout", str(self.idle_timeout),
                "--no-show"  # 不显示启动banner
            ]
            
            print(f"   执行命令: {' '.join(command)}")
            
            # 设置环境变量
            env = self._get_scheduler_env()
            
            # 启动调度器进程
            self.scheduler_process = subprocess.Popen(
                command,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=env,
                preexec_fn=os.setsid  # 创建新的进程组
            )
            
            # 等待调度器启动
            if self._wait_for_scheduler_ready():
                self.scheduler_info = self._read_scheduler_info()
                print(f"   ✅ 调度器启动成功")
                print(f"   调度器地址: {self.scheduler_info.get('address', 'Unknown')}")
                print(f"   仪表板地址: http://localhost:{self.dashboard_port}")
                return True
            else:
                print(f"   ❌ 调度器启动失败")
                self._cleanup_scheduler()
                return False
                
        except Exception as e:
            print(f"   ❌ 启动调度器时发生错误: {str(e)}")
            self._cleanup_scheduler()
            return False
    
    def _get_scheduler_env(self):
        """
        获取调度器环境变量
        
        返回:
            dict: 环境变量字典
        """
        env = os.environ.copy()
        
        # Dask调度器配置
        env.update({
            'DASK_DISTRIBUTED__SCHEDULER__WORK_STEALING': 'True',
            'DASK_DISTRIBUTED__SCHEDULER__BANDWIDTH': '100MB/s',
            'DASK_DISTRIBUTED__SCHEDULER__ALLOWED_FAILURES': '3',
            'DASK_DISTRIBUTED__SCHEDULER__PICKLE_PROTOCOL': '4',
            'DASK_LOGGING__DISTRIBUTED': 'info'
        })
        
        return env
    
    def _wait_for_scheduler_ready(self, max_wait=30):
        """
        等待调度器就绪
        
        参数:
            max_wait (int): 最大等待时间（秒）
            
        返回:
            bool: 是否就绪
        """
        print(f"   等待调度器就绪...")
        
        for i in range(max_wait):
            # 检查进程是否还在运行
            if self.scheduler_process.poll() is not None:
                print(f"   ❌ 调度器进程已退出")
                return False
            
            # 检查调度器文件是否创建
            if os.path.exists(self.scheduler_file):
                try:
                    with open(self.scheduler_file, 'r') as f:
                        scheduler_data = json.load(f)
                        if 'address' in scheduler_data:
                            print(f"   ✅ 调度器文件创建成功 ({i+1}秒)")
                            return True
                except:
                    pass
            
            time.sleep(1)
        
        print(f"   ⚠️  调度器启动超时")
        return False
    
    def _read_scheduler_info(self):
        """
        读取调度器信息
        
        返回:
            dict: 调度器信息
        """
        try:
            with open(self.scheduler_file, 'r') as f:
                return json.load(f)
        except Exception as e:
            print(f"   ⚠️  读取调度器信息失败: {str(e)}")
            return {}
    
    def get_scheduler_address(self):
        """
        获取调度器地址
        
        返回:
            str: 调度器地址
        """
        return self.scheduler_info.get('address', '')
    
    def is_running(self):
        """
        检查调度器是否正在运行
        
        返回:
            bool: 是否运行中
        """
        if not self.scheduler_process:
            return False
        return self.scheduler_process.poll() is None
    
    def stop_scheduler(self):
        """停止调度器"""
        print(f"\n🛑 停止CLI调度器")
        self._cleanup_scheduler()
    
    def _cleanup_scheduler(self):
        """清理调度器资源"""
        if self.scheduler_process:
            try:
                # 先尝试优雅停止
                self.scheduler_process.terminate()
                
                # 等待进程结束
                try:
                    self.scheduler_process.wait(timeout=10)
                    print(f"   ✅ 调度器进程已停止")
                except subprocess.TimeoutExpired:
                    # 强制杀死进程组
                    os.killpg(os.getpgid(self.scheduler_process.pid), signal.SIGKILL)
                    print(f"   ⚠️  强制终止调度器进程")
                    
            except Exception as e:
                print(f"   ⚠️  停止调度器时发生错误: {str(e)}")
        
        # 清理调度器文件
        if os.path.exists(self.scheduler_file):
            try:
                os.remove(self.scheduler_file)
                print(f"   ✅ 调度器文件已清理")
            except Exception as e:
                print(f"   ⚠️  清理调度器文件失败: {str(e)}")


class CLIWorkerManager:
    """
    CLI工作节点管理器
    
    负责管理多个Dask工作节点的CLI启动、监控和停止。提供生产级的
    工作节点管理功能，包括负载均衡、故障检测、自动重启等。
    """
    
    def __init__(self, scheduler_file, n_workers=2, nthreads=1, 
                 memory_limit="2GB", lifetime="120s"):
        """
        初始化CLI工作节点管理器
        
        参数:
            scheduler_file (str): 调度器信息文件
            n_workers (int): 工作节点数量
            nthreads (int): 每个节点线程数
            memory_limit (str): 内存限制
            lifetime (str): 节点生命周期
        """
        self.scheduler_file = scheduler_file
        self.n_workers = n_workers
        self.nthreads = nthreads
        self.memory_limit = memory_limit
        self.lifetime = lifetime
        self.worker_processes = []
        
        print(f"🎯 初始化CLI工作节点管理器")
        print(f"   工作节点数量: {n_workers}")
        print(f"   线程数/节点: {nthreads}")
        print(f"   内存限制: {memory_limit}")
        print(f"   生命周期: {lifetime}")
    
    def start_workers(self):
        """
        启动所有工作节点
        
        返回:
            bool: 是否成功启动
        """
        print(f"\n🚀 启动{self.n_workers}个CLI工作节点")
        
        success_count = 0
        
        for i in range(self.n_workers):
            if self._start_single_worker(i):
                success_count += 1
                time.sleep(1)  # 错开启动时间
        
        print(f"   ✅ 成功启动{success_count}/{self.n_workers}个工作节点")
        return success_count == self.n_workers
    
    def _start_single_worker(self, worker_id):
        """
        启动单个工作节点
        
        参数:
            worker_id (int): 工作节点ID
            
        返回:
            bool: 是否启动成功
        """
        try:
            # 构建工作节点启动命令
            command = [
                "dask-worker",
                "--scheduler-file", self.scheduler_file,
                "--nthreads", str(self.nthreads),
                "--memory-limit", self.memory_limit,
                "--lifetime", self.lifetime,
                "--name", f"cli-worker-{worker_id}",
                "--death-timeout", "60s",
                "--no-nanny"  # 直接启动worker，不使用nanny
            ]
            
            print(f"   启动Worker-{worker_id}: {' '.join(command[:4])}...")
            
            # 设置环境变量
            env = self._get_worker_env()
            
            # 启动工作节点进程
            worker_process = subprocess.Popen(
                command,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=env,
                preexec_fn=os.setsid
            )
            
            # 记录工作节点信息
            worker_info = {
                'id': worker_id,
                'process': worker_process,
                'command': ' '.join(command),
                'start_time': datetime.now(),
                'status': 'starting'
            }
            
            self.worker_processes.append(worker_info)
            
            # 简单检查进程是否启动
            time.sleep(2)
            if worker_process.poll() is None:
                worker_info['status'] = 'running'
                print(f"     Worker-{worker_id} 启动成功 (PID: {worker_process.pid})")
                return True
            else:
                worker_info['status'] = 'failed'
                print(f"     Worker-{worker_id} 启动失败")
                return False
                
        except Exception as e:
            print(f"     Worker-{worker_id} 启动异常: {str(e)}")
            return False
    
    def _get_worker_env(self):
        """
        获取工作节点环境变量
        
        返回:
            dict: 环境变量字典
        """
        env = os.environ.copy()
        
        # Dask工作节点配置
        env.update({
            'DASK_DISTRIBUTED__WORKER__DAEMON': 'False',
            'DASK_DISTRIBUTED__WORKER__MULTIPROCESSING_METHOD': 'spawn',
            'DASK_DISTRIBUTED__WORKER__USE_FILE_LOCKING': 'False',
            'DASK_DISTRIBUTED__WORKER__MEMORY_MONITOR_INTERVAL': '1s',
            'OMP_NUM_THREADS': str(self.nthreads),
            'DASK_LOGGING__DISTRIBUTED': 'info'
        })
        
        return env
    
    def get_worker_status(self):
        """
        获取工作节点状态
        
        返回:
            dict: 工作节点状态信息
        """
        status_summary = {
            'total_workers': len(self.worker_processes),
            'running_workers': 0,
            'failed_workers': 0,
            'workers': []
        }
        
        for worker_info in self.worker_processes:
            process = worker_info['process']
            current_status = 'running' if process.poll() is None else 'stopped'
            
            worker_status = {
                'id': worker_info['id'],
                'status': current_status,
                'pid': process.pid if current_status == 'running' else None,
                'start_time': worker_info['start_time'],
                'command': worker_info['command']
            }
            
            status_summary['workers'].append(worker_status)
            
            if current_status == 'running':
                status_summary['running_workers'] += 1
            else:
                status_summary['failed_workers'] += 1
        
        return status_summary
    
    def stop_workers(self):
        """停止所有工作节点"""
        print(f"\n🛑 停止所有CLI工作节点")
        
        for worker_info in self.worker_processes:
            self._stop_single_worker(worker_info)
    
    def _stop_single_worker(self, worker_info):
        """停止单个工作节点"""
        worker_id = worker_info['id']
        process = worker_info['process']
        
        try:
            if process.poll() is None:  # 进程还在运行
                # 优雅停止
                process.terminate()
                
                try:
                    process.wait(timeout=10)
                    print(f"   ✅ Worker-{worker_id} 已停止")
                except subprocess.TimeoutExpired:
                    # 强制杀死
                    os.killpg(os.getpgid(process.pid), signal.SIGKILL)
                    print(f"   ⚠️  强制终止Worker-{worker_id}")
            else:
                print(f"   ℹ️  Worker-{worker_id} 已经停止")
                
        except Exception as e:
            print(f"   ⚠️  停止Worker-{worker_id}时发生错误: {str(e)}")


class ProductionClientManager:
    """
    生产级客户端管理器
    
    提供生产环境级别的Dask客户端管理，包括连接管理、健康检查、
    故障恢复、性能监控等功能。
    """
    
    def __init__(self, scheduler_file, timeout=30, heartbeat_interval=5000):
        """
        初始化生产级客户端管理器
        
        参数:
            scheduler_file (str): 调度器信息文件
            timeout (int): 连接超时时间（秒）
            heartbeat_interval (int): 心跳间隔（毫秒）
        """
        self.scheduler_file = scheduler_file
        self.timeout = timeout
        self.heartbeat_interval = heartbeat_interval
        self.client = None
        self.connection_info = {}
        
        print(f"🎯 初始化生产级客户端管理器")
        print(f"   调度器文件: {scheduler_file}")
        print(f"   连接超时: {timeout}秒")
        print(f"   心跳间隔: {heartbeat_interval}毫秒")
    
    def connect_to_cluster(self, max_retry=3):
        """
        连接到分布式集群
        
        参数:
            max_retry (int): 最大重试次数
            
        返回:
            bool: 是否连接成功
        """
        print(f"\n🔗 连接到分布式集群")
        
        for attempt in range(max_retry):
            try:
                print(f"   尝试连接 (第{attempt+1}次)")
                
                # 等待调度器文件
                if not self._wait_for_scheduler_file():
                    continue
                
                # 创建客户端连接
                self.client = dask.distributed.Client(
                    scheduler_file=self.scheduler_file,
                    timeout=f"{self.timeout}s",
                    heartbeat_interval=self.heartbeat_interval,
                    reconnect=True,
                    set_as_default=True
                )
                
                # 验证连接
                self.connection_info = self._verify_connection()
                
                if self.connection_info:
                    print(f"   ✅ 客户端连接成功")
                    self._print_connection_info()
                    return True
                else:
                    print(f"   ❌ 连接验证失败")
                    if self.client:
                        self.client.close()
                        self.client = None
                
            except Exception as e:
                print(f"   ❌ 连接失败: {str(e)}")
                if self.client:
                    try:
                        self.client.close()
                    except:
                        pass
                    self.client = None
            
            if attempt < max_retry - 1:
                print(f"   等待5秒后重试...")
                time.sleep(5)
        
        print(f"   ❌ 客户端连接失败")
        return False
    
    def _wait_for_scheduler_file(self, max_wait=30):
        """
        等待调度器文件创建
        
        参数:
            max_wait (int): 最大等待时间（秒）
            
        返回:
            bool: 文件是否存在
        """
        for i in range(max_wait):
            if os.path.exists(self.scheduler_file):
                return True
            time.sleep(1)
        
        print(f"   ⚠️  调度器文件等待超时")
        return False
    
    def _verify_connection(self):
        """
        验证客户端连接
        
        返回:
            dict: 连接信息
        """
        try:
            # 获取调度器信息
            scheduler_info = self.client.scheduler_info()
            
            # 获取工作节点信息
            workers = scheduler_info.get('workers', {})
            
            # 计算资源统计
            total_cores = sum(w.get('nthreads', 0) for w in workers.values())
            total_memory = sum(w.get('memory_limit', 0) for w in workers.values())
            
            connection_info = {
                'scheduler_address': scheduler_info.get('address'),
                'worker_count': len(workers),
                'total_cores': total_cores,
                'total_memory_gb': total_memory / (1024**3) if total_memory > 0 else 0,
                'cluster_status': 'healthy' if len(workers) > 0 else 'no_workers'
            }
            
            return connection_info
            
        except Exception as e:
            print(f"   ⚠️  连接验证异常: {str(e)}")
            return {}
    
    def _print_connection_info(self):
        """打印连接信息"""
        if self.connection_info:
            print(f"   📊 集群信息:")
            print(f"     调度器地址: {self.connection_info.get('scheduler_address')}")
            print(f"     工作节点: {self.connection_info.get('worker_count')}个")
            print(f"     总核心数: {self.connection_info.get('total_cores')}")
            print(f"     总内存: {self.connection_info.get('total_memory_gb', 0):.1f}GB")
            print(f"     集群状态: {self.connection_info.get('cluster_status')}")
    
    def get_client(self):
        """
        获取Dask客户端
        
        返回:
            dask.distributed.Client: 客户端实例
        """
        if not self.client:
            raise RuntimeError("客户端未连接")
        return self.client
    
    def check_cluster_health(self):
        """
        检查集群健康状态
        
        返回:
            dict: 健康状态信息
        """
        if not self.client:
            return {'status': 'disconnected'}
        
        try:
            scheduler_info = self.client.scheduler_info()
            workers = scheduler_info.get('workers', {})
            
            healthy_workers = 0
            for worker_info in workers.values():
                if worker_info.get('status') == 'running':
                    healthy_workers += 1
            
            health_info = {
                'status': 'healthy' if healthy_workers > 0 else 'unhealthy',
                'total_workers': len(workers),
                'healthy_workers': healthy_workers,
                'health_ratio': healthy_workers / len(workers) if workers else 0,
                'scheduler_status': 'running'
            }
            
            return health_info
            
        except Exception as e:
            return {
                'status': 'error',
                'error': str(e)
            }
    
    def close_connection(self):
        """关闭客户端连接"""
        if self.client:
            try:
                self.client.close()
                print(f"   ✅ 客户端连接已关闭")
            except Exception as e:
                print(f"   ⚠️  关闭客户端连接失败: {str(e)}")
            finally:
                self.client = None


class CLIDistributedTrainer:
    """
    CLI分布式训练器
    
    基于CLI部署的分布式AutoML训练管理器，提供完整的训练流程控制、
    监控和结果管理。
    """
    
    def __init__(self, client_manager, tmp_folder="/tmp/cli_distributed_automl"):
        """
        初始化CLI分布式训练器
        
        参数:
            client_manager: 客户端管理器
            tmp_folder (str): 临时文件夹
        """
        self.client_manager = client_manager
        self.tmp_folder = tmp_folder
        self.automl = None
        self.training_results = {}
        
        # 创建临时目录
        os.makedirs(tmp_folder, exist_ok=True)
        
        print(f"🎯 初始化CLI分布式训练器")
        print(f"   临时目录: {tmp_folder}")
    
    def create_automl_classifier(self, time_budget=60, **kwargs):
        """
        创建AutoML分类器
        
        参数:
            time_budget (int): 时间预算（秒）
            **kwargs: 其他参数
        """
        print(f"\n🏗️  创建分布式AutoML分类器")
        
        # 获取Dask客户端
        dask_client = self.client_manager.get_client()
        
        # 默认配置
        default_config = {
            'time_left_for_this_task': time_budget,
            'per_run_time_limit': max(10, time_budget // 6),
            'memory_limit': 2048,
            'tmp_folder': self.tmp_folder,
            
            # 分布式配置
            'dask_client': dask_client,
            'n_jobs': 1,  # 被dask_client覆盖
            
            # 算法配置
            'include_estimators': [
                'extra_trees',
                'random_forest', 
                'gradient_boosting',
                'sgd',
                'passive_aggressive'
            ],
            'include_preprocessors': ['no_preprocessing'],
            
            # 验证配置
            'resampling_strategy': 'holdout',
            'resampling_strategy_arguments': {'train_size': 0.67},
            
            # 其他配置
            'delete_tmp_folder_after_terminate': False,
            'disable_evaluator_output': False,
            'seed': 42
        }
        
        # 合并配置
        config = {**default_config, **kwargs}
        
        # 创建分类器
        self.automl = AutoSklearnClassifier(**config)
        
        print(f"   ✅ AutoML分类器创建完成")
        print(f"   时间预算: {time_budget}秒")
        print(f"   使用工作节点: {self.client_manager.connection_info.get('worker_count', 0)}个")
        
        return self.automl
    
    def train_model(self, X_train, y_train, dataset_name="cli_distributed"):
        """
        训练分布式模型
        
        参数:
            X_train: 训练特征
            y_train: 训练标签
            dataset_name (str): 数据集名称
        """
        if not self.automl:
            raise ValueError("请先创建AutoML分类器")
        
        print(f"\n🚀 开始CLI分布式训练")
        print(f"   数据集: {dataset_name}")
        print(f"   训练样本: {X_train.shape[0]}")
        print(f"   特征数量: {X_train.shape[1]}")
        
        # 检查集群健康状态
        health_info = self.client_manager.check_cluster_health()
        print(f"   集群状态: {health_info.get('status')}")
        print(f"   健康工作节点: {health_info.get('healthy_workers', 0)}/{health_info.get('total_workers', 0)}")
        
        # 记录训练开始时间
        training_start = time.time()
        
        try:
            # 执行训练
            print(f"   开始模型搜索和训练...")
            self.automl.fit(X_train, y_train, dataset_name=dataset_name)
            
            # 构建集成模型
            print(f"   开始集成模型构建...")
            self.automl.fit_ensemble(
                y_train,
                task=MULTICLASS_CLASSIFICATION,
                dataset_name=dataset_name,
                ensemble_kwargs={'ensemble_size': 20},
                ensemble_nbest=50
            )
            
            training_end = time.time()
            training_time = training_end - training_start
            
            # 记录训练结果
            self.training_results = {
                'dataset_name': dataset_name,
                'training_time': training_time,
                'data_shape': X_train.shape,
                'start_time': datetime.fromtimestamp(training_start),
                'end_time': datetime.fromtimestamp(training_end),
                'cluster_info': self.client_manager.connection_info.copy()
            }
            
            print(f"   ✅ CLI分布式训练完成")
            print(f"   总训练时间: {training_time:.2f}秒")
            
            return True
            
        except Exception as e:
            print(f"   ❌ CLI分布式训练失败: {str(e)}")
            return False
    
    def evaluate_model(self, X_test, y_test):
        """
        评估模型性能
        
        参数:
            X_test: 测试特征
            y_test: 测试标签
            
        返回:
            dict: 评估结果
        """
        if not self.automl:
            raise ValueError("模型未训练")
        
        print(f"\n📈 评估模型性能")
        
        # 预测
        predictions = self.automl.predict(X_test)
        probabilities = self.automl.predict_proba(X_test)
        
        # 计算指标
        metrics = {
            'accuracy': sklearn.metrics.accuracy_score(y_test, predictions),
            'precision': sklearn.metrics.precision_score(y_test, predictions, average='weighted'),
            'recall': sklearn.metrics.recall_score(y_test, predictions, average='weighted'),
            'f1_score': sklearn.metrics.f1_score(y_test, predictions, average='weighted'),
            'auc': sklearn.metrics.roc_auc_score(y_test, probabilities[:, 1]),
            'confusion_matrix': confusion_matrix(y_test, predictions)
        }
        
        # 模型信息
        models = self.automl.show_models()
        model_info = {
            'model_count': len(models),
            'models': dict(models) if models else {},
            'algorithm_diversity': len(set(type(model).__name__ for _, model in models.items())) if models else 0
        }
        
        evaluation_results = {
            'metrics': metrics,
            'model_info': model_info,
            'statistics': self.automl.sprint_statistics()
        }
        
        # 更新训练结果
        self.training_results.update(evaluation_results)
        
        print(f"   准确率: {metrics['accuracy']:.4f}")
        print(f"   精确率: {metrics['precision']:.4f}")
        print(f"   召回率: {metrics['recall']:.4f}")
        print(f"   F1分数: {metrics['f1_score']:.4f}")
        print(f"   AUC: {metrics['auc']:.4f}")
        print(f"   集成模型数: {model_info['model_count']}")
        
        return evaluation_results


def main():
    """
    主函数：演示CLI手动并行Auto-sklearn
    """
    print("🎯 Auto-sklearn CLI手动并行计算示例")
    print("=" * 60)
    
    # CLI组件管理器
    scheduler_manager = None
    worker_manager = None
    client_manager = None
    trainer = None
    
    try:
        # 1. 数据准备
        print("\n📊 准备乳腺癌数据集")
        X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
        X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
            X, y, test_size=0.3, random_state=42, stratify=y
        )
        
        print(f"   数据集形状: {X.shape}")
        print(f"   训练集: {X_train.shape[0]}样本")
        print(f"   测试集: {X_test.shape[0]}样本")
        print(f"   特征数量: {X_train.shape[1]}个")
        
        # 2. 启动CLI调度器
        scheduler_manager = CLISchedulerManager(
            scheduler_file="scheduler-file.json",
            port=8786,
            dashboard_port=8787,
            idle_timeout=300
        )
        
        if not scheduler_manager.start_scheduler():
            raise RuntimeError("调度器启动失败")
        
        # 3. 启动CLI工作节点
        worker_manager = CLIWorkerManager(
            scheduler_file="scheduler-file.json",
            n_workers=2,
            nthreads=1,
            memory_limit="2GB",
            lifetime="180s"  # 3分钟生命周期
        )
        
        if not worker_manager.start_workers():
            print("⚠️  部分工作节点启动失败，继续执行...")
        
        # 等待工作节点连接
        time.sleep(5)
        
        # 4. 创建生产级客户端
        client_manager = ProductionClientManager(
            scheduler_file="scheduler-file.json",
            timeout=30,
            heartbeat_interval=5000
        )
        
        if not client_manager.connect_to_cluster():
            raise RuntimeError("客户端连接失败")
        
        # 5. 创建分布式训练器
        trainer = CLIDistributedTrainer(
            client_manager=client_manager,
            tmp_folder=f"/tmp/cli_automl_{int(time.time())}"
        )
        
        # 6. 创建和训练模型
        time_budget = 50  # 50秒训练预算
        automl = trainer.create_automl_classifier(time_budget=time_budget)
        
        print(f"\n🚀 开始分布式训练")
        training_success = trainer.train_model(X_train, y_train, "breast_cancer_cli")
        
        if not training_success:
            raise RuntimeError("分布式训练失败")
        
        # 7. 模型评估
        evaluation_results = trainer.evaluate_model(X_test, y_test)
        
        # 8. 性能分析
        print(f"\n📊 CLI分布式性能分析")
        training_results = trainer.training_results
        
        # 基本性能指标
        training_time = training_results.get('training_time', 0)
        worker_count = training_results.get('cluster_info', {}).get('worker_count', 1)
        
        print(f"   训练时间: {training_time:.2f}秒")
        print(f"   工作节点: {worker_count}个")
        print(f"   集成模型: {evaluation_results['model_info']['model_count']}个")
        print(f"   最终准确率: {evaluation_results['metrics']['accuracy']:.4f}")
        
        # 估算性能提升
        estimated_sequential_time = training_time * 1.8  # 估算单机时间
        speedup = estimated_sequential_time / training_time
        efficiency = speedup / worker_count
        
        print(f"\n⚡ 性能提升分析:")
        print(f"   估算单机时间: {estimated_sequential_time:.2f}秒")
        print(f"   实际分布式时间: {training_time:.2f}秒")
        print(f"   加速比: {speedup:.2f}x")
        print(f"   并行效率: {efficiency:.1%}")
        
        # 9. 工作节点状态检查
        worker_status = worker_manager.get_worker_status()
        print(f"\n🔧 工作节点状态:")
        print(f"   总节点: {worker_status['total_workers']}")
        print(f"   运行中: {worker_status['running_workers']}")
        print(f"   已停止: {worker_status['failed_workers']}")
        
        # 10. 集群健康检查
        health_info = client_manager.check_cluster_health()
        print(f"\n💚 集群健康状态:")
        print(f"   状态: {health_info.get('status')}")
        print(f"   健康节点: {health_info.get('healthy_workers', 0)}/{health_info.get('total_workers', 0)}")
        print(f"   健康比例: {health_info.get('health_ratio', 0):.1%}")
        
        # 11. 详细分类报告
        print(f"\n📊 详细分类报告")
        print(classification_report(y_test, trainer.automl.predict(X_test),
                                  target_names=['良性', '恶性']))
        
        # 12. 训练统计信息
        print(f"\n📋 训练统计信息")
        print(evaluation_results['statistics'])
        
        # 13. CLI部署优势总结
        print(f"\n🎉 CLI分布式部署优势")
        print(f"   🔧 运维友好: 标准CLI工具，易于集成监控系统")
        print(f"   📊 可观测性: 完整的日志和指标采集")
        print(f"   🚀 可扩展性: 支持多机房和云原生部署")
        print(f"   🛡️  生产就绪: 适合企业级生产环境")
        print(f"   ⚡ 性能提升: {speedup:.1f}倍训练加速")
        
        return {
            'training_results': training_results,
            'evaluation_results': evaluation_results,
            'performance_metrics': {
                'speedup': speedup,
                'efficiency': efficiency,
                'training_time': training_time
            }
        }
        
    except KeyboardInterrupt:
        print(f"\n⚠️  用户中断程序")
        return None
        
    except Exception as e:
        print(f"\n❌ 程序执行失败: {str(e)}")
        import traceback
        traceback.print_exc()
        return None
        
    finally:
        # 清理资源
        print(f"\n🧹 清理CLI分布式资源")
        
        # 关闭客户端连接
        if client_manager:
            client_manager.close_connection()
        
        # 停止工作节点
        if worker_manager:
            worker_manager.stop_workers()
        
        # 停止调度器
        if scheduler_manager:
            scheduler_manager.stop_scheduler()
        
        print(f"   ✅ 资源清理完成")


if __name__ == "__main__":
    print(f"⚠️  重要提示: CLI并行部署适合生产环境使用")
    print(f"   需要确保系统已安装dask命令行工具")
    print(f"   建议在专用的分布式环境中运行")
    print()
    
    try:
        results = main()
        
        if results:
            print(f"\n✅ CLI分布式训练示例执行成功!")
            print(f"   最终准确率: {results['evaluation_results']['metrics']['accuracy']:.4f}")
            print(f"   训练加速: {results['performance_metrics']['speedup']:.1f}倍")
            print(f"   并行效率: {results['performance_metrics']['efficiency']:.1%}")
        else:
            print(f"\n❌ CLI分布式训练示例执行失败")
            
    except KeyboardInterrupt:
        print(f"\n⚠️  程序被用户中断")
        
    except Exception as e:
        print(f"\n❌ 程序执行失败: {str(e)}")
        
    finally:
        print(f"\n👋 感谢使用Auto-sklearn CLI分布式示例!")
        print(f"   📊 监控仪表板: http://localhost:8787")
        print(f"   🗂️  临时文件: /tmp/cli_automl_*")
        print(f"   📋 调度器文件: scheduler-file.json")