#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AutoML并行计算搜索策略示例
Parallel Computing Search Strategy Example

本示例演示如何使用Auto-sklearn的并行计算模式进行高效的模型训练和集成构建。
并行计算模式通过多核心处理显著提升AutoML的执行效率。

作者: AutoML学习团队
日期: 2024-07-12
"""

import os
import time
import warnings
import multiprocessing
from datetime import datetime
from pprint import pprint

import numpy as np
import psutil
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import classification_report, confusion_matrix

import autosklearn.classification
from autosklearn.ensembles.ensemble_selection import EnsembleSelection

# 忽略警告信息
warnings.filterwarnings('ignore')


class ParallelExecutionController:
    """
    AutoML并行执行控制器
    
    控制Auto-sklearn的并行执行流程，通过多核心处理提升训练效率：
    1. 自动检测最优并行配置
    2. 实时监控资源使用情况
    3. 智能负载均衡管理
    4. 并行效率分析和优化
    """
    
    def __init__(self, n_jobs=None, memory_limit=None, time_budget=120):
        """
        初始化并行执行控制器
        
        参数:
            n_jobs (int): 并行进程数，None表示自动检测
            memory_limit (int): 内存限制（MB），None表示自动计算
            time_budget (int): 总时间预算（秒）
        """
        self.time_budget = time_budget
        self.n_jobs = n_jobs or self._detect_optimal_n_jobs()
        self.memory_limit = memory_limit or self._calculate_memory_limit()
        self.tmp_folder = f"/tmp/autosklearn_parallel_{int(time.time())}"
        
        self.start_time = None
        self.end_time = None
        self.automl = None
        self.resource_monitor = ResourceMonitor()
        
        print(f"🔧 初始化并行执行控制器")
        print(f"   CPU核心数: {multiprocessing.cpu_count()}")
        print(f"   并行进程数: {self.n_jobs}")
        print(f"   内存限制: {self.memory_limit}MB")
        print(f"   时间预算: {time_budget}秒")
    
    def _detect_optimal_n_jobs(self):
        """
        自动检测最优并行进程数
        
        返回:
            int: 推荐的并行进程数
        """
        cpu_count = multiprocessing.cpu_count()
        
        # 获取可用内存
        available_memory = psutil.virtual_memory().available
        memory_per_job = 1024 * 1024 * 1024  # 1GB per job
        max_jobs_by_memory = available_memory // memory_per_job
        
        # 保留一个核心给系统，最多使用8个进程
        max_jobs_by_cpu = max(1, min(cpu_count - 1, 8))
        
        optimal_n_jobs = min(max_jobs_by_cpu, max_jobs_by_memory, 6)
        
        print(f"🧮 自动检测并行配置:")
        print(f"   CPU核心数: {cpu_count}")
        print(f"   可用内存: {available_memory / (1024**3):.1f}GB")
        print(f"   推荐并行数: {optimal_n_jobs}")
        
        return optimal_n_jobs
    
    def _calculate_memory_limit(self):
        """
        计算合理的内存限制
        
        返回:
            int: 内存限制（MB）
        """
        total_memory = psutil.virtual_memory().total
        available_memory = psutil.virtual_memory().available
        
        # 为每个进程分配合理的内存，保留系统缓冲
        memory_per_job = min(2048, available_memory // (self.n_jobs * 1024 * 1024))
        total_limit = memory_per_job * self.n_jobs
        
        print(f"💾 内存配置:")
        print(f"   总内存: {total_memory / (1024**3):.1f}GB")
        print(f"   可用内存: {available_memory / (1024**3):.1f}GB") 
        print(f"   每进程分配: {memory_per_job}MB")
        print(f"   总限制: {total_limit}MB")
        
        return total_limit
    
    def create_parallel_automl(self):
        """
        创建并行配置的Auto-sklearn分类器
        """
        print(f"\n🏗️  创建Auto-sklearn分类器（并行模式）")
        
        self.automl = autosklearn.classification.AutoSklearnClassifier(
            time_left_for_this_task=self.time_budget,
            per_run_time_limit=int(self.time_budget / 8),  # 单次运行时间限制
            tmp_folder=self.tmp_folder,
            
            # 并行配置
            n_jobs=self.n_jobs,  # 关键: 并行进程数
            memory_limit=self.memory_limit,  # 内存限制
            
            # 集成配置
            ensemble_class=EnsembleSelection,  # 启用并行集成构建
            ensemble_kwargs={
                'ensemble_size': 25,  # 集成模型数量
                'sorted_initialization_n_best': 10  # 初始化最佳模型数
            },
            
            # 优化配置
            initial_configurations_via_metalearning=25,  # 元学习初始配置
            include_estimators=None,  # 包含所有估计器
            exclude_estimators=None,  # 不排除任何估计器
            
            # SMAC配置
            smac_scenario_args={
                'runcount_limit': 100,  # 最大运行次数
                'wallclock_limit': self.time_budget,  # 时间限制
            },
            
            # 输出控制
            delete_tmp_folder_after_terminate=False,  # 保留中间结果
            logging_config=None,  # 使用默认日志配置
            seed=42  # 固定随机种子
        )
        
        print(f"   ✅ 并行分类器创建完成")
        print(f"   🔄 并行进程数: {self.n_jobs}")
        print(f"   💾 内存限制: {self.memory_limit}MB")
        print(f"   ⏰ 时间预算: {self.time_budget}秒")
    
    def fit_parallel(self, X_train, y_train, dataset_name="breast_cancer"):
        """
        并行训练模型
        
        参数:
            X_train: 训练特征
            y_train: 训练标签
            dataset_name: 数据集名称
        """
        print(f"\n🚀 开始并行训练")
        print(f"   数据集: {dataset_name}")
        print(f"   训练样本: {X_train.shape[0]}个")
        print(f"   特征数量: {X_train.shape[1]}个")
        print(f"   类别分布: {np.bincount(y_train)}")
        print(f"   并行进程: {self.n_jobs}个")
        
        # 启动资源监控
        self.resource_monitor.start_monitoring()
        self.start_time = time.time()
        
        try:
            # 执行并行训练
            self.automl.fit(X_train, y_train, dataset_name=dataset_name)
            self.end_time = time.time()
            
            # 停止资源监控
            self.resource_monitor.stop_monitoring()
            
            training_duration = self.end_time - self.start_time
            print(f"\n✅ 并行训练完成")
            print(f"   总耗时: {training_duration:.2f}秒")
            print(f"   训练的模型数量: {len(self.automl.show_models())}")
            
            # 显示训练统计信息
            print(f"\n📊 训练统计信息:")
            print(self.automl.sprint_statistics())
            
        except Exception as e:
            self.resource_monitor.stop_monitoring()
            print(f"❌ 并行训练失败: {str(e)}")
            raise
    
    def get_performance_summary(self):
        """
        获取性能摘要
        
        返回:
            dict: 包含训练时间和资源使用的性能摘要
        """
        total_time = self.end_time - self.start_time if self.end_time else 0
        resource_stats = self.resource_monitor.get_statistics()
        
        return {
            'total_time': total_time,
            'n_jobs': self.n_jobs,
            'theoretical_speedup': self.n_jobs,
            'actual_speedup': resource_stats.get('actual_speedup', 0),
            'parallel_efficiency': resource_stats.get('parallel_efficiency', 0),
            'cpu_utilization': resource_stats.get('avg_cpu_percent', 0),
            'memory_peak': resource_stats.get('peak_memory_mb', 0),
            'memory_efficiency': resource_stats.get('memory_efficiency', 0)
        }


class ResourceMonitor:
    """
    资源监控器
    
    实时监控并行执行过程中的CPU、内存等系统资源使用情况。
    """
    
    def __init__(self, monitoring_interval=2):
        """
        初始化资源监控器
        
        参数:
            monitoring_interval (int): 监控间隔（秒）
        """
        self.monitoring_interval = monitoring_interval
        self.monitoring = False
        self.metrics_history = []
        self.start_time = None
        
    def start_monitoring(self):
        """开始监控"""
        self.monitoring = True
        self.start_time = time.time()
        self.metrics_history = []
        print(f"📊 开始资源监控（间隔: {self.monitoring_interval}秒）")
        
        # 在后台线程中进行监控
        import threading
        self.monitoring_thread = threading.Thread(target=self._monitoring_loop)
        self.monitoring_thread.daemon = True
        self.monitoring_thread.start()
    
    def stop_monitoring(self):
        """停止监控"""
        self.monitoring = False
        if hasattr(self, 'monitoring_thread'):
            self.monitoring_thread.join(timeout=5)
        print(f"⏹️  资源监控已停止")
    
    def _monitoring_loop(self):
        """监控循环"""
        while self.monitoring:
            try:
                metrics = self._collect_metrics()
                self.metrics_history.append(metrics)
                
                # 打印实时状态
                print(f"   📈 CPU: {metrics['cpu_percent']:.1f}% | "
                      f"内存: {metrics['memory_percent']:.1f}% | "
                      f"进程: {metrics['process_count']}")
                
                time.sleep(self.monitoring_interval)
            except Exception as e:
                print(f"⚠️  监控错误: {e}")
                break
    
    def _collect_metrics(self):
        """收集当前系统指标"""
        # CPU使用率
        cpu_percent = psutil.cpu_percent(interval=0.1)
        cpu_per_core = psutil.cpu_percent(interval=0.1, percpu=True)
        
        # 内存使用
        memory = psutil.virtual_memory()
        
        # 进程信息
        current_process = psutil.Process()
        children = current_process.children(recursive=True)
        
        return {
            'timestamp': time.time() - self.start_time,
            'cpu_percent': cpu_percent,
            'cpu_per_core': cpu_per_core,
            'memory_percent': memory.percent,
            'memory_used_mb': memory.used / (1024 * 1024),
            'memory_available_mb': memory.available / (1024 * 1024),
            'process_count': len(children) + 1,
            'load_average': os.getloadavg() if hasattr(os, 'getloadavg') else [0, 0, 0]
        }
    
    def get_statistics(self):
        """
        获取监控统计信息
        
        返回:
            dict: 统计结果
        """
        if not self.metrics_history:
            return {}
        
        # 计算平均值和峰值
        cpu_values = [m['cpu_percent'] for m in self.metrics_history]
        memory_values = [m['memory_used_mb'] for m in self.metrics_history]
        process_counts = [m['process_count'] for m in self.metrics_history]
        
        avg_cpu = np.mean(cpu_values)
        peak_cpu = np.max(cpu_values)
        avg_memory = np.mean(memory_values)
        peak_memory = np.max(memory_values)
        avg_processes = np.mean(process_counts)
        
        # 计算并行效率（简化估算）
        theoretical_max_cpu = 100  # 理论最大CPU使用率
        parallel_efficiency = min(avg_cpu / theoretical_max_cpu, 1.0)
        
        return {
            'avg_cpu_percent': avg_cpu,
            'peak_cpu_percent': peak_cpu,
            'avg_memory_mb': avg_memory,
            'peak_memory_mb': peak_memory,
            'avg_process_count': avg_processes,
            'parallel_efficiency': parallel_efficiency,
            'monitoring_duration': self.metrics_history[-1]['timestamp'] if self.metrics_history else 0
        }


class ParallelPerformanceAnalyzer:
    """
    并行性能分析器
    
    分析并行执行的效率和性能表现，包括加速比、并行效率等关键指标。
    """
    
    def __init__(self, automl_model, n_jobs):
        """
        初始化性能分析器
        
        参数:
            automl_model: 训练好的AutoML模型
            n_jobs: 并行进程数
        """
        self.automl = automl_model
        self.n_jobs = n_jobs
    
    def evaluate_predictions(self, X_test, y_test):
        """
        评估模型预测性能
        
        参数:
            X_test: 测试特征
            y_test: 测试标签
            
        返回:
            dict: 评估结果
        """
        print(f"\n📈 模型性能评估")
        
        # 生成预测结果
        predictions = self.automl.predict(X_test)
        probabilities = self.automl.predict_proba(X_test)
        
        # 计算各种评估指标
        accuracy = sklearn.metrics.accuracy_score(y_test, predictions)
        precision = sklearn.metrics.precision_score(y_test, predictions, average='weighted')
        recall = sklearn.metrics.recall_score(y_test, predictions, average='weighted')
        f1 = sklearn.metrics.f1_score(y_test, predictions, average='weighted')
        auc = sklearn.metrics.roc_auc_score(y_test, probabilities[:, 1])
        
        # 混淆矩阵
        cm = confusion_matrix(y_test, predictions)
        
        print(f"   准确率 (Accuracy): {accuracy:.4f}")
        print(f"   精确率 (Precision): {precision:.4f}")
        print(f"   召回率 (Recall): {recall:.4f}")
        print(f"   F1分数: {f1:.4f}")
        print(f"   AUC: {auc:.4f}")
        
        print(f"\n📊 分类报告:")
        print(classification_report(y_test, predictions,
                                  target_names=['良性 (Benign)', '恶性 (Malignant)']))
        
        print(f"\n🔍 混淆矩阵:")
        print("实际\\预测   良性   恶性")
        print(f"良性      {cm[0,0]:4d}   {cm[0,1]:4d}")
        print(f"恶性      {cm[1,0]:4d}   {cm[1,1]:4d}")
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1_score': f1,
            'auc': auc,
            'confusion_matrix': cm,
            'predictions': predictions,
            'probabilities': probabilities
        }
    
    def analyze_parallel_efficiency(self, execution_time, baseline_time=None):
        """
        分析并行执行效率
        
        参数:
            execution_time: 实际执行时间
            baseline_time: 基线时间（顺序执行时间）
        """
        print(f"\n⚡ 并行效率分析")
        print(f"   并行进程数: {self.n_jobs}")
        print(f"   实际执行时间: {execution_time:.2f}秒")
        
        if baseline_time:
            actual_speedup = baseline_time / execution_time
            efficiency = actual_speedup / self.n_jobs
            print(f"   基线时间: {baseline_time:.2f}秒")
            print(f"   实际加速比: {actual_speedup:.2f}x")
            print(f"   并行效率: {efficiency:.2f} ({efficiency*100:.1f}%)")
        else:
            # 估算理论性能
            estimated_sequential = execution_time * self.n_jobs * 0.7  # 估算系数
            print(f"   估算顺序时间: {estimated_sequential:.2f}秒")
            print(f"   估算加速比: {estimated_sequential/execution_time:.2f}x")
    
    def analyze_model_ensemble(self):
        """分析模型集成情况"""
        print(f"\n🏆 模型集成分析")
        
        models = self.automl.show_models()
        if not models:
            print("   ⚠️  没有可用的模型进行分析")
            return
        
        # 统计不同算法类型
        algorithm_counts = {}
        total_weight = 0
        
        for weight, model in models.items():
            algorithm = type(model).__name__
            algorithm_counts[algorithm] = algorithm_counts.get(algorithm, 0) + weight
            total_weight += weight
        
        print(f"   集成模型总数: {len(models)}")
        print(f"   算法类型数: {len(algorithm_counts)}")
        print(f"   权重分布:")
        
        for algorithm, weight in sorted(algorithm_counts.items(), 
                                       key=lambda x: x[1], reverse=True):
            percentage = weight / total_weight * 100
            print(f"     {algorithm}: {weight:.4f} ({percentage:.1f}%)")
        
        return {
            'model_count': len(models),
            'algorithm_diversity': len(algorithm_counts),
            'algorithm_weights': algorithm_counts
        }
    
    def save_analysis_results(self, performance_summary, evaluation_results, 
                            ensemble_analysis, output_dir="../results"):
        """
        保存分析结果
        
        参数:
            performance_summary: 性能摘要
            evaluation_results: 评估结果
            ensemble_analysis: 集成分析结果
            output_dir: 输出目录
        """
        os.makedirs(output_dir, exist_ok=True)
        
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存详细分析报告
        report_file = os.path.join(output_dir, f"parallel_analysis_{timestamp}.txt")
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("AutoML并行计算性能分析报告\n")
            f.write("=" * 60 + "\n\n")
            
            # 执行配置
            f.write("执行配置:\n")
            f.write(f"并行进程数: {performance_summary['n_jobs']}\n")
            f.write(f"总执行时间: {performance_summary['total_time']:.2f}秒\n")
            f.write(f"理论加速比: {performance_summary['theoretical_speedup']:.2f}x\n\n")
            
            # 资源使用
            f.write("资源使用:\n")
            f.write(f"平均CPU使用率: {performance_summary['cpu_utilization']:.1f}%\n")
            f.write(f"内存峰值: {performance_summary['memory_peak']:.1f}MB\n")
            f.write(f"并行效率: {performance_summary['parallel_efficiency']:.3f}\n\n")
            
            # 预测性能
            f.write("预测性能:\n")
            f.write(f"准确率: {evaluation_results['accuracy']:.4f}\n")
            f.write(f"精确率: {evaluation_results['precision']:.4f}\n")
            f.write(f"召回率: {evaluation_results['recall']:.4f}\n")
            f.write(f"F1分数: {evaluation_results['f1_score']:.4f}\n")
            f.write(f"AUC: {evaluation_results['auc']:.4f}\n\n")
            
            # 模型集成
            if ensemble_analysis:
                f.write("模型集成:\n")
                f.write(f"集成模型数: {ensemble_analysis['model_count']}\n")
                f.write(f"算法多样性: {ensemble_analysis['algorithm_diversity']}\n")
                for algo, weight in ensemble_analysis['algorithm_weights'].items():
                    f.write(f"  {algo}: {weight:.4f}\n")
        
        print(f"\n💾 分析报告已保存到: {report_file}")


def main():
    """
    主函数：演示AutoML并行计算搜索策略
    """
    print("🎯 AutoML并行计算搜索策略示例")
    print("=" * 60)
    
    # 检查系统环境
    print(f"\n🖥️  系统环境检查")
    print(f"   CPU核心数: {multiprocessing.cpu_count()}")
    print(f"   可用内存: {psutil.virtual_memory().available / (1024**3):.1f}GB")
    print(f"   Python进程限制: {os.cpu_count()}")
    
    # 1. 数据加载和预处理
    print("\n📊 加载Wisconsin乳腺癌数据集")
    X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )
    
    print(f"   数据集形状: {X.shape}")
    print(f"   训练集: {X_train.shape[0]}样本")
    print(f"   测试集: {X_test.shape[0]}样本") 
    print(f"   特征数量: {X_train.shape[1]}个")
    print(f"   类别分布 - 训练集: 良性{np.sum(y_train==0)}, 恶性{np.sum(y_train==1)}")
    print(f"   类别分布 - 测试集: 良性{np.sum(y_test==0)}, 恶性{np.sum(y_test==1)}")
    
    # 2. 创建并配置并行执行控制器
    controller = ParallelExecutionController(
        n_jobs=None,  # 自动检测
        memory_limit=None,  # 自动计算
        time_budget=120  # 2分钟时间预算
    )
    
    # 3. 创建并行AutoML分类器
    controller.create_parallel_automl()
    
    # 4. 执行并行训练
    controller.fit_parallel(X_train, y_train, "breast_cancer")
    
    # 5. 性能分析
    analyzer = ParallelPerformanceAnalyzer(controller.automl, controller.n_jobs)
    
    # 获取执行性能摘要
    performance_summary = controller.get_performance_summary()
    
    print(f"\n⏱️  执行性能摘要:")
    print(f"   总执行时间: {performance_summary['total_time']:.2f}秒")
    print(f"   并行进程数: {performance_summary['n_jobs']}")
    print(f"   CPU利用率: {performance_summary['cpu_utilization']:.1f}%")
    print(f"   内存峰值: {performance_summary['memory_peak']:.1f}MB")
    print(f"   并行效率: {performance_summary['parallel_efficiency']:.3f}")
    
    # 评估预测性能
    evaluation_results = analyzer.evaluate_predictions(X_test, y_test)
    
    # 分析并行效率
    analyzer.analyze_parallel_efficiency(performance_summary['total_time'])
    
    # 分析模型集成
    ensemble_analysis = analyzer.analyze_model_ensemble()
    
    # 保存分析结果
    analyzer.save_analysis_results(performance_summary, evaluation_results, ensemble_analysis)
    
    # 6. 生成最终报告
    print(f"\n🎉 并行计算策略演示完成!")
    print(f"   最终准确率: {evaluation_results['accuracy']:.4f}")
    print(f"   总执行时间: {performance_summary['total_time']:.2f}秒")
    print(f"   并行效率: {performance_summary['parallel_efficiency']:.3f}")
    print(f"   集成模型数: {ensemble_analysis['model_count'] if ensemble_analysis else 0}")
    
    # 7. 清理建议
    print(f"\n🧹 资源清理:")
    print(f"   临时文件夹: {controller.tmp_folder}")
    print(f"   建议在完成分析后手动清理临时文件")
    
    return controller, analyzer, performance_summary, evaluation_results


if __name__ == "__main__":
    # 设置随机种子以确保结果可重现
    np.random.seed(42)
    
    # 检查是否在主进程中运行（多进程要求）
    if __name__ == '__main__':
        try:
            controller, analyzer, performance_summary, evaluation_results = main()
            print(f"\n✅ 程序执行成功!")
            
        except KeyboardInterrupt:
            print(f"\n⚠️  程序被用户中断")
            
        except Exception as e:
            print(f"\n❌ 程序执行失败: {str(e)}")
            import traceback
            traceback.print_exc()
            
        finally:
            print(f"\n👋 感谢使用AutoML并行计算示例!")