#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
乳腺癌诊断二分类 - 主要实现脚本
基于Auto-sklearn的完整机器学习生命周期实现

作者: AutoML学习指南
日期: 2024年7月
"""

import time
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path

import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split

import autosklearn.classification
import autosklearn.metrics

# 设置结果目录
RESULTS_DIR = Path("../results")
RESULTS_DIR.mkdir(exist_ok=True)
(RESULTS_DIR / "visualizations").mkdir(exist_ok=True)
(RESULTS_DIR / "model_artifacts").mkdir(exist_ok=True)


def load_and_prepare_data():
    """
    加载并准备乳腺癌数据
    
    Returns:
        dict: 包含训练/测试数据和元信息的字典
    """
    print("🔄 正在加载威斯康星乳腺癌数据集...")
    
    # 加载数据
    data = sklearn.datasets.load_breast_cancer()
    X, y = data.data, data.target
    
    # 创建DataFrame用于分析
    feature_names = data.feature_names
    target_names = data.target_names
    
    df = pd.DataFrame(X, columns=feature_names)
    df['target'] = y
    
    print(f"数据集信息:")
    print(f"  - 样本数量: {X.shape[0]}")
    print(f"  - 特征数量: {X.shape[1]}")
    print(f"  - 类别分布: {dict(zip(target_names, np.bincount(y)))}")
    
    # 分层分割数据
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    
    print(f"训练集大小: {X_train.shape}")
    print(f"测试集大小: {X_test.shape}")
    print(f"训练集类别分布: {np.bincount(y_train)}")
    
    return {
        'X_train': X_train, 'X_test': X_test,
        'y_train': y_train, 'y_test': y_test,
        'feature_names': feature_names,
        'target_names': target_names,
        'dataframe': df,
        'dataset_description': data.DESCR
    }


def create_medical_scorer():
    """
    创建医疗场景优化的评估指标
    
    Returns:
        autosklearn.metrics.Scorer: 自定义医疗评分器
    """
    def medical_score(solution, prediction):
        """
        医疗场景评分函数
        重点关注敏感性(sensitivity)，减少假阴性
        """
        from sklearn.metrics import confusion_matrix, recall_score, precision_score
        
        # 计算混淆矩阵
        tn, fp, fn, tp = confusion_matrix(solution, prediction).ravel()
        
        # 计算医疗相关指标
        sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0  # 敏感性
        specificity = tn / (tn + fp) if (tn + fp) > 0 else 0  # 特异性
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0    # 精确率
        
        # 医疗加权评分：敏感性权重更高
        # 在乳腺癌诊断中，漏诊(假阴性)比误诊(假阳性)后果更严重
        weighted_score = (0.6 * sensitivity +   # 敏感性权重60%
                         0.3 * specificity +   # 特异性权重30%
                         0.1 * precision)      # 精确率权重10%
        
        return weighted_score
    
    # 创建scorer对象
    medical_scorer = autosklearn.metrics.make_scorer(
        name='medical_weighted_score',
        score_func=medical_score,
        optimum=1,
        greater_is_better=True,
        needs_proba=False,
        needs_threshold=False,
    )
    
    print("✅ 创建医疗优化评分器: 敏感性(60%) + 特异性(30%) + 精确率(10%)")
    return medical_scorer


def create_automl_classifier():
    """
    创建和配置Auto-sklearn分类器
    
    Returns:
        autosklearn.classification.AutoSklearnClassifier: 配置好的分类器
    """
    print("⚙️ 正在配置Auto-sklearn分类器...")
    
    # 获取自定义评分器
    medical_scorer = create_medical_scorer()
    
    # 配置Auto-sklearn
    automl = autosklearn.classification.AutoSklearnClassifier(
        # 时间预算配置
        time_left_for_this_task=300,      # 总时间5分钟 (可根据需要调整)
        per_run_time_limit=30,            # 单个模型30秒
        
        # 性能配置
        n_jobs=4,                         # 并行作业数
        memory_limit=3072,                # 内存限制 (3GB)
        
        # 评估配置
        metric=medical_scorer,            # 使用自定义医疗指标
        
        # 集成配置
        ensemble_kwargs={
            'ensemble_size': 15,          # 集成大小
            'task_type': 1,              # 分类任务
        },
        
        # 验证策略
        resampling_strategy='cv',         # 交叉验证
        resampling_strategy_arguments={'folds': 5},
        
        # 临时文件
        tmp_folder="/tmp/breast_cancer_automl",
        delete_tmp_folder_after_terminate=False,  # 保留临时文件用于分析
    )
    
    print("配置完成:")
    print(f"  - 时间预算: 5分钟")
    print(f"  - 并行作业: 4个")
    print(f"  - 评估策略: 5折交叉验证")
    print(f"  - 评估指标: 自定义医疗评分器")
    
    return automl


def train_model(automl, X_train, y_train):
    """
    训练Auto-sklearn模型
    
    Args:
        automl: Auto-sklearn分类器
        X_train: 训练特征
        y_train: 训练标签
        
    Returns:
        dict: 训练结果和统计信息
    """
    print("🚀 开始模型训练...")
    print("⏱️  预计需要5分钟，请耐心等待...")
    
    start_time = time.time()
    
    # 训练模型
    automl.fit(
        X_train, y_train,
        dataset_name="wisconsin_breast_cancer"
    )
    
    training_time = time.time() - start_time
    
    print(f"✅ 训练完成! 用时: {training_time:.2f}秒")
    
    # 获取训练统计信息
    try:
        leaderboard = automl.leaderboard()
        statistics = automl.sprint_statistics()
        models_info = automl.show_models()
        
        training_results = {
            'training_time': training_time,
            'models_evaluated': len(leaderboard),
            'best_score': leaderboard.iloc[0]['cost'] if len(leaderboard) > 0 else None,
            'ensemble_size': len([i for i in leaderboard.index 
                                if leaderboard.loc[i, 'ensemble_weight'] > 0]),
        }
        
        print(f"训练统计:")
        print(f"  - 评估模型数: {training_results['models_evaluated']}")
        print(f"  - 集成大小: {training_results['ensemble_size']}")
        print(f"  - 最佳评分: {training_results['best_score']:.4f}")
        
    except Exception as e:
        print(f"⚠️ 无法获取详细统计信息: {e}")
        training_results = {'training_time': training_time}
    
    return training_results


def evaluate_model(automl, X_test, y_test, target_names):
    """
    全面评估模型性能
    
    Args:
        automl: 训练好的Auto-sklearn模型
        X_test: 测试特征
        y_test: 测试标签
        target_names: 类别名称
        
    Returns:
        dict: 评估结果
    """
    print("📊 正在进行模型评估...")
    
    # 获取预测结果
    y_pred = automl.predict(X_test)
    y_proba = automl.predict_proba(X_test)
    
    # 基础分类指标
    from sklearn.metrics import (
        accuracy_score, precision_score, recall_score, f1_score,
        roc_auc_score, confusion_matrix, classification_report
    )
    
    basic_metrics = {
        'accuracy': accuracy_score(y_test, y_pred),
        'precision': precision_score(y_test, y_pred, average='weighted'),
        'recall': recall_score(y_test, y_pred, average='weighted'),
        'f1_score': f1_score(y_test, y_pred, average='weighted'),
        'auc_roc': roc_auc_score(y_test, y_proba[:, 1]),
    }
    
    # 医疗特定指标
    tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
    
    medical_metrics = {
        'sensitivity': tp / (tp + fn) if (tp + fn) > 0 else 0,  # 敏感性
        'specificity': tn / (tn + fp) if (tn + fp) > 0 else 0,  # 特异性
        'ppv': tp / (tp + fp) if (tp + fp) > 0 else 0,          # 阳性预测值
        'npv': tn / (tn + fn) if (tn + fn) > 0 else 0,          # 阴性预测值
        'false_positive_rate': fp / (fp + tn) if (fp + tn) > 0 else 0,
        'false_negative_rate': fn / (fn + tp) if (fn + tp) > 0 else 0,
    }
    
    # 混淆矩阵分析
    confusion_analysis = {
        'true_negative': int(tn),   # 正确识别良性
        'false_positive': int(fp),  # 误诊为恶性  
        'false_negative': int(fn),  # 漏诊恶性 (最危险!)
        'true_positive': int(tp),   # 正确识别恶性
        'total_samples': len(y_test)
    }
    
    # 打印评估结果
    print("📈 模型性能评估结果:")
    print(f"准确率: {basic_metrics['accuracy']:.4f}")
    print(f"精确率: {basic_metrics['precision']:.4f}")
    print(f"召回率: {basic_metrics['recall']:.4f}")
    print(f"F1分数: {basic_metrics['f1_score']:.4f}")
    print(f"AUC-ROC: {basic_metrics['auc_roc']:.4f}")
    
    print("\\n🏥 医疗相关指标:")
    print(f"敏感性 (识别恶性): {medical_metrics['sensitivity']:.4f}")
    print(f"特异性 (识别良性): {medical_metrics['specificity']:.4f}")
    print(f"阳性预测值: {medical_metrics['ppv']:.4f}")
    print(f"阴性预测值: {medical_metrics['npv']:.4f}")
    
    print("\\n🔍 临床重要信息:")
    print(f"漏诊恶性病例: {confusion_analysis['false_negative']}个")
    print(f"误诊良性病例: {confusion_analysis['false_positive']}个")
    print(f"正确诊断: {confusion_analysis['true_positive'] + confusion_analysis['true_negative']}个")
    
    # 临床风险评估
    fn_risk_weight = 10  # 漏诊权重
    fp_risk_weight = 1   # 误诊权重
    
    clinical_risk_score = (fn * fn_risk_weight + fp * fp_risk_weight) / (len(y_test) * fn_risk_weight) * 100
    
    print(f"\\n⚠️ 临床风险评分: {clinical_risk_score:.1f}/100")
    
    if clinical_risk_score < 10:
        risk_level = "低风险 ✅"
        deployment_ready = True
    elif clinical_risk_score < 25:
        risk_level = "中等风险 ⚠️"
        deployment_ready = False
    else:
        risk_level = "高风险 ❌"
        deployment_ready = False
    
    print(f"风险等级: {risk_level}")
    print(f"部署建议: {'可以考虑部署' if deployment_ready else '需要进一步优化'}")
    
    evaluation_results = {
        'basic_metrics': basic_metrics,
        'medical_metrics': medical_metrics,
        'confusion_analysis': confusion_analysis,
        'clinical_risk': {
            'score': clinical_risk_score,
            'level': risk_level,
            'deployment_ready': deployment_ready
        },
        'predictions': {
            'y_pred': y_pred.tolist(),
            'y_proba': y_proba.tolist(),
            'y_test': y_test.tolist()
        }
    }
    
    return evaluation_results


def create_visualizations(automl, evaluation_results, feature_names):
    """
    创建评估结果可视化
    
    Args:
        automl: 训练好的模型
        evaluation_results: 评估结果
        feature_names: 特征名称
    """
    print("📊 正在生成可视化图表...")
    
    plt.style.use('default')
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))
    
    # 1. 混淆矩阵
    import seaborn as sns
    from sklearn.metrics import confusion_matrix
    
    y_test = np.array(evaluation_results['predictions']['y_test'])
    y_pred = np.array(evaluation_results['predictions']['y_pred'])
    
    cm = confusion_matrix(y_test, y_pred)
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['Malignant', 'Benign'],
                yticklabels=['Malignant', 'Benign'],
                ax=axes[0, 0])
    axes[0, 0].set_title('混淆矩阵')
    axes[0, 0].set_ylabel('实际类别')
    axes[0, 0].set_xlabel('预测类别')
    
    # 2. ROC曲线
    from sklearn.metrics import roc_curve
    y_proba = np.array(evaluation_results['predictions']['y_proba'])
    
    fpr, tpr, _ = roc_curve(y_test, y_proba[:, 1])
    auc_score = evaluation_results['basic_metrics']['auc_roc']
    
    axes[0, 1].plot(fpr, tpr, linewidth=2, label=f'ROC曲线 (AUC = {auc_score:.3f})')
    axes[0, 1].plot([0, 1], [0, 1], 'k--', alpha=0.5)
    axes[0, 1].set_xlabel('假阳性率')
    axes[0, 1].set_ylabel('真阳性率')
    axes[0, 1].set_title('ROC曲线')
    axes[0, 1].legend()
    axes[0, 1].grid(True, alpha=0.3)
    
    # 3. 医疗指标对比
    medical_metrics = evaluation_results['medical_metrics']
    metrics_names = ['敏感性', '特异性', '阳性预测值', '阴性预测值']
    metrics_values = [
        medical_metrics['sensitivity'],
        medical_metrics['specificity'],
        medical_metrics['ppv'],
        medical_metrics['npv']
    ]
    
    bars = axes[1, 0].bar(metrics_names, metrics_values, 
                         color=['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4'])
    axes[1, 0].set_ylim(0, 1)
    axes[1, 0].set_title('医疗相关指标')
    axes[1, 0].set_ylabel('指标值')
    
    # 添加数值标签
    for bar, value in zip(bars, metrics_values):
        height = bar.get_height()
        axes[1, 0].text(bar.get_x() + bar.get_width()/2., height + 0.01,
                        f'{value:.3f}', ha='center', va='bottom')
    
    # 4. 预测概率分布
    malignant_proba = y_proba[y_test == 0, 0]  # 恶性样本的恶性概率
    benign_proba = y_proba[y_test == 1, 0]     # 良性样本的恶性概率
    
    axes[1, 1].hist(benign_proba, bins=20, alpha=0.7, label='良性样本', color='green')
    axes[1, 1].hist(malignant_proba, bins=20, alpha=0.7, label='恶性样本', color='red')
    axes[1, 1].axvline(0.5, color='black', linestyle='--', alpha=0.8, label='决策阈值')
    axes[1, 1].set_xlabel('预测为恶性的概率')
    axes[1, 1].set_ylabel('样本数量')
    axes[1, 1].set_title('预测概率分布')
    axes[1, 1].legend()
    
    plt.tight_layout()
    
    # 保存图表
    viz_path = RESULTS_DIR / "visualizations" / "evaluation_dashboard.png"
    plt.savefig(viz_path, dpi=300, bbox_inches='tight')
    print(f"📁 可视化图表已保存: {viz_path}")
    
    plt.show()


def save_results(training_results, evaluation_results, automl):
    """
    保存训练和评估结果
    
    Args:
        training_results: 训练结果
        evaluation_results: 评估结果  
        automl: 训练好的模型
    """
    print("💾 正在保存结果...")
    
    # 保存性能指标
    performance_data = {
        'training_info': training_results,
        'evaluation_results': {
            'basic_metrics': evaluation_results['basic_metrics'],
            'medical_metrics': evaluation_results['medical_metrics'],
            'confusion_analysis': evaluation_results['confusion_analysis'],
            'clinical_risk': evaluation_results['clinical_risk']
        },
        'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
        'model_config': {
            'framework': 'auto-sklearn',
            'task_type': 'binary_classification',
            'dataset': 'wisconsin_breast_cancer',
            'evaluation_metric': 'medical_weighted_score'
        }
    }
    
    # 保存为JSON
    performance_path = RESULTS_DIR / "model_performance.json"
    with open(performance_path, 'w', encoding='utf-8') as f:
        json.dump(performance_data, f, indent=2, ensure_ascii=False)
    
    print(f"📁 性能指标已保存: {performance_path}")
    
    # 保存模型 (可选，文件可能很大)
    try:
        import joblib
        model_path = RESULTS_DIR / "model_artifacts" / "trained_model.pkl"
        joblib.dump(automl, model_path)
        print(f"📁 训练好的模型已保存: {model_path}")
    except Exception as e:
        print(f"⚠️ 模型保存失败: {e}")
    
    # 保存排行榜信息
    try:
        leaderboard_path = RESULTS_DIR / "model_artifacts" / "leaderboard.csv"
        automl.leaderboard().to_csv(leaderboard_path)
        print(f"📁 模型排行榜已保存: {leaderboard_path}")
    except Exception as e:
        print(f"⚠️ 排行榜保存失败: {e}")


def main():
    """
    主函数 - 执行完整的机器学习生命周期
    """
    print("🏥 乳腺癌诊断二分类 - Auto-sklearn实现")
    print("=" * 50)
    
    try:
        # 1. 数据准备
        data_dict = load_and_prepare_data()
        
        # 2. 模型配置
        automl = create_automl_classifier()
        
        # 3. 模型训练
        training_results = train_model(
            automl, data_dict['X_train'], data_dict['y_train']
        )
        
        # 4. 模型评估
        evaluation_results = evaluate_model(
            automl, data_dict['X_test'], data_dict['y_test'], 
            data_dict['target_names']
        )
        
        # 5. 结果可视化
        create_visualizations(
            automl, evaluation_results, data_dict['feature_names']
        )
        
        # 6. 保存结果
        save_results(training_results, evaluation_results, automl)
        
        print("\\n" + "=" * 50)
        print("✅ 完整流程执行成功!")
        print("📁 请查看 results/ 目录获取详细结果")
        print("📊 可视化图表: results/visualizations/")
        print("📈 性能指标: results/model_performance.json")
        print("🤖 训练模型: results/model_artifacts/")
        
        return {
            'success': True,
            'model': automl,
            'results': evaluation_results
        }
        
    except Exception as e:
        print(f"❌ 执行过程中出现错误: {e}")
        import traceback
        traceback.print_exc()
        return {'success': False, 'error': str(e)}


if __name__ == "__main__":
    # 执行主流程
    result = main()
    
    if result['success']:
        print("\\n🎉 恭喜! 您已成功完成乳腺癌诊断模型的训练和评估!")
        print("\\n📚 学习建议:")
        print("1. 分析 results/model_performance.json 中的详细指标")
        print("2. 观察可视化图表，理解模型性能")
        print("3. 尝试调整 time_left_for_this_task 参数，观察性能变化")
        print("4. 修改医疗评分函数的权重，体验不同的优化目标")
        print("5. 继续学习下一个示例: ../02_regression/")
    else:
        print("\\n💡 遇到问题了? 请检查:")
        print("1. 是否正确安装了 auto-sklearn")
        print("2. 是否有足够的内存和存储空间")
        print("3. 查看错误信息，寻求帮助")