#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
乳腺癌数据集深度分析脚本
探索性数据分析(EDA)和数据质量评估

作者: AutoML学习指南
日期: 2024年7月
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')

import sklearn.datasets
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from scipy import stats

# 设置图表样式
plt.style.use('seaborn-v0_8-whitegrid')
sns.set_palette("husl")

# 结果目录
RESULTS_DIR = Path("../results/visualizations")
RESULTS_DIR.mkdir(parents=True, exist_ok=True)


def load_dataset():
    """
    加载威斯康星乳腺癌数据集
    
    Returns:
        tuple: (DataFrame, 数据集描述信息)
    """
    print("🔄 加载威斯康星乳腺癌数据集...")
    
    # 加载数据
    data = sklearn.datasets.load_breast_cancer()
    
    # 创建DataFrame
    df = pd.DataFrame(data.data, columns=data.feature_names)
    df['target'] = data.target
    df['target_name'] = df['target'].map({0: 'malignant', 1: 'benign'})
    
    dataset_info = {
        'description': data.DESCR,
        'feature_names': data.feature_names,
        'target_names': data.target_names,
        'shape': df.shape,
        'missing_values': df.isnull().sum().sum()
    }
    
    print(f"✅ 数据集加载完成:")
    print(f"   - 样本数: {df.shape[0]}")
    print(f"   - 特征数: {df.shape[1]-2}")  # 减去target和target_name
    print(f"   - 缺失值: {dataset_info['missing_values']}")
    
    return df, dataset_info


def basic_statistics_analysis(df):
    """
    基础统计分析
    
    Args:
        df: 数据DataFrame
        
    Returns:
        dict: 统计分析结果
    """
    print("\\n📊 执行基础统计分析...")
    
    # 目标变量分析
    target_analysis = {
        'class_counts': df['target'].value_counts().to_dict(),
        'class_proportions': df['target'].value_counts(normalize=True).to_dict(),
        'class_names': df['target_name'].value_counts().to_dict()
    }
    
    print(f"类别分布:")
    for target, name in zip([0, 1], ['恶性', '良性']):
        count = target_analysis['class_counts'][target]
        prop = target_analysis['class_proportions'][target]
        print(f"   - {name}: {count}个样本 ({prop:.1%})")
    
    # 特征统计
    numeric_features = df.select_dtypes(include=[np.number]).columns.drop(['target'])
    
    feature_stats = {}
    for feature in numeric_features:
        stats_dict = {
            'mean': df[feature].mean(),
            'median': df[feature].median(),
            'std': df[feature].std(),
            'min': df[feature].min(),
            'max': df[feature].max(),
            'skewness': df[feature].skew(),
            'kurtosis': df[feature].kurtosis(),
            'cv': df[feature].std() / df[feature].mean() if df[feature].mean() != 0 else 0
        }
        feature_stats[feature] = stats_dict
    
    # 特征组分析 (mean, se, worst)
    feature_groups = {
        'mean_features': [f for f in numeric_features if '_mean' in f],
        'se_features': [f for f in numeric_features if '_se' in f], 
        'worst_features': [f for f in numeric_features if '_worst' in f]
    }
    
    print(f"\\n特征组分布:")
    for group_name, features in feature_groups.items():
        print(f"   - {group_name}: {len(features)}个特征")
    
    return {
        'target_analysis': target_analysis,
        'feature_statistics': feature_stats,
        'feature_groups': feature_groups
    }


def correlation_analysis(df):
    """
    相关性分析
    
    Args:
        df: 数据DataFrame
        
    Returns:
        dict: 相关性分析结果
    """
    print("\\n🔗 执行相关性分析...")
    
    numeric_features = df.select_dtypes(include=[np.number]).columns.drop(['target'])
    correlation_matrix = df[numeric_features].corr()
    
    # 寻找高相关性特征对
    high_correlations = []
    threshold = 0.8
    
    for i in range(len(correlation_matrix.columns)):
        for j in range(i+1, len(correlation_matrix.columns)):
            corr_value = correlation_matrix.iloc[i, j]
            if abs(corr_value) > threshold:
                high_correlations.append({
                    'feature1': correlation_matrix.columns[i],
                    'feature2': correlation_matrix.columns[j],
                    'correlation': corr_value
                })
    
    print(f"发现 {len(high_correlations)} 对高相关性特征 (|r| > {threshold}):")
    for pair in high_correlations[:5]:  # 显示前5对
        print(f"   - {pair['feature1']} vs {pair['feature2']}: {pair['correlation']:.3f}")
    
    # 特征与目标变量的相关性
    target_correlations = {}
    for feature in numeric_features:
        corr_with_target = df[feature].corr(df['target'])
        target_correlations[feature] = corr_with_target
    
    # 按相关性排序
    sorted_target_corr = sorted(target_correlations.items(), 
                               key=lambda x: abs(x[1]), reverse=True)
    
    print(f"\\n与目标变量相关性最高的特征:")
    for feature, corr in sorted_target_corr[:5]:
        print(f"   - {feature}: {corr:.3f}")
    
    return {
        'correlation_matrix': correlation_matrix,
        'high_correlations': high_correlations,
        'target_correlations': dict(sorted_target_corr)
    }


def distribution_analysis(df):
    """
    分布分析
    
    Args:
        df: 数据DataFrame
        
    Returns:
        dict: 分布分析结果
    """
    print("\\n📈 执行分布分析...")
    
    numeric_features = df.select_dtypes(include=[np.number]).columns.drop(['target'])
    
    # 正态性检验
    normality_tests = {}
    for feature in numeric_features:
        # Shapiro-Wilk检验 (样本量<5000时使用)
        if len(df) < 5000:
            stat, p_value = stats.shapiro(df[feature].sample(min(1000, len(df))))
        else:
            # Anderson-Darling检验 (大样本)
            stat, p_value = stats.normaltest(df[feature])
        
        normality_tests[feature] = {
            'statistic': stat,
            'p_value': p_value,
            'is_normal': p_value > 0.05
        }
    
    normal_features = [f for f, test in normality_tests.items() if test['is_normal']]
    print(f"正态分布特征数量: {len(normal_features)}/{len(numeric_features)}")
    
    # 异常值检测 (IQR方法)
    outlier_analysis = {}
    for feature in numeric_features:
        Q1 = df[feature].quantile(0.25)
        Q3 = df[feature].quantile(0.75)
        IQR = Q3 - Q1
        
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        
        outliers = df[(df[feature] < lower_bound) | (df[feature] > upper_bound)]
        
        outlier_analysis[feature] = {
            'count': len(outliers),
            'percentage': len(outliers) / len(df) * 100,
            'lower_bound': lower_bound,
            'upper_bound': upper_bound
        }
    
    # 统计异常值情况
    features_with_outliers = {f: info for f, info in outlier_analysis.items() 
                             if info['count'] > 0}
    
    print(f"存在异常值的特征: {len(features_with_outliers)}/{len(numeric_features)}")
    
    return {
        'normality_tests': normality_tests,
        'outlier_analysis': outlier_analysis,
        'normal_features': normal_features
    }


def class_comparison_analysis(df):
    """
    类别对比分析
    
    Args:
        df: 数据DataFrame
        
    Returns:
        dict: 类别对比分析结果
    """
    print("\\n⚖️ 执行类别对比分析...")
    
    numeric_features = df.select_dtypes(include=[np.number]).columns.drop(['target'])
    
    # 按类别分组统计
    class_stats = {}
    for class_name in ['malignant', 'benign']:
        class_data = df[df['target_name'] == class_name]
        
        class_stats[class_name] = {}
        for feature in numeric_features:
            class_stats[class_name][feature] = {
                'mean': class_data[feature].mean(),
                'median': class_data[feature].median(),
                'std': class_data[feature].std()
            }
    
    # 计算类别间差异
    feature_differences = {}
    for feature in numeric_features:
        malignant_mean = class_stats['malignant'][feature]['mean']
        benign_mean = class_stats['benign'][feature]['mean']
        
        # 计算效应大小 (Cohen's d)
        pooled_std = np.sqrt(
            (class_stats['malignant'][feature]['std']**2 + 
             class_stats['benign'][feature]['std']**2) / 2
        )
        
        cohens_d = abs(malignant_mean - benign_mean) / pooled_std if pooled_std > 0 else 0
        
        # t检验
        malignant_data = df[df['target_name'] == 'malignant'][feature]
        benign_data = df[df['target_name'] == 'benign'][feature]
        
        t_stat, p_value = stats.ttest_ind(malignant_data, benign_data)
        
        feature_differences[feature] = {
            'malignant_mean': malignant_mean,
            'benign_mean': benign_mean,
            'difference': malignant_mean - benign_mean,
            'cohens_d': cohens_d,
            't_statistic': t_stat,
            'p_value': p_value,
            'significant': p_value < 0.05
        }
    
    # 按效应大小排序
    sorted_differences = sorted(feature_differences.items(), 
                               key=lambda x: x[1]['cohens_d'], reverse=True)
    
    print(f"类别区分能力最强的特征 (按Cohen's d排序):")
    for feature, diff_info in sorted_differences[:5]:
        d = diff_info['cohens_d']
        p = diff_info['p_value']
        print(f"   - {feature}: Cohen's d = {d:.3f}, p = {p:.2e}")
    
    return {
        'class_statistics': class_stats,
        'feature_differences': dict(sorted_differences)
    }


def create_comprehensive_visualizations(df, stats_results, corr_results, 
                                      dist_results, class_results):
    """
    创建综合数据分析可视化
    
    Args:
        df: 数据DataFrame
        stats_results: 统计分析结果
        corr_results: 相关性分析结果
        dist_results: 分布分析结果
        class_results: 类别对比结果
    """
    print("\\n🎨 创建数据分析可视化...")
    
    # 创建大型综合图表
    fig = plt.figure(figsize=(20, 24))
    
    # 1. 类别分布饼图
    ax1 = plt.subplot(4, 3, 1)
    target_counts = df['target_name'].value_counts()
    colors = ['#FF6B6B', '#4ECDC4']
    wedges, texts, autotexts = ax1.pie(target_counts.values, labels=target_counts.index, 
                                      autopct='%1.1f%%', colors=colors, startangle=90)
    ax1.set_title('类别分布', fontsize=14, fontweight='bold')
    
    # 2. 相关性热力图 (选择部分特征)
    ax2 = plt.subplot(4, 3, 2)
    # 选择相关性最高的特征
    top_features = list(class_results['feature_differences'].keys())[:10]
    corr_subset = corr_results['correlation_matrix'].loc[top_features, top_features]
    
    sns.heatmap(corr_subset, annot=False, cmap='coolwarm', center=0, 
                square=True, ax=ax2, cbar_kws={'shrink': 0.8})
    ax2.set_title('特征相关性热力图 (Top 10)', fontsize=14, fontweight='bold')
    ax2.tick_params(axis='both', labelsize=8)
    
    # 3. 特征重要性 (基于Cohen's d)
    ax3 = plt.subplot(4, 3, 3)
    top_discriminative = list(class_results['feature_differences'].keys())[:10]
    cohens_d_values = [class_results['feature_differences'][f]['cohens_d'] 
                      for f in top_discriminative]
    
    bars = ax3.barh(range(len(top_discriminative)), cohens_d_values, color='skyblue')
    ax3.set_yticks(range(len(top_discriminative)))
    ax3.set_yticklabels([f.replace('_', ' ').title() for f in top_discriminative], fontsize=8)
    ax3.set_xlabel("Cohen's d")
    ax3.set_title('特征区分能力 (效应大小)', fontsize=14, fontweight='bold')
    ax3.invert_yaxis()
    
    # 4-6. 重要特征的分布对比 (选择前3个)
    for i, feature in enumerate(top_discriminative[:3]):
        ax = plt.subplot(4, 3, 4 + i)
        
        malignant_data = df[df['target_name'] == 'malignant'][feature]
        benign_data = df[df['target_name'] == 'benign'][feature]
        
        ax.hist(malignant_data, bins=20, alpha=0.7, label='恶性', color='red', density=True)
        ax.hist(benign_data, bins=20, alpha=0.7, label='良性', color='green', density=True)
        
        ax.set_xlabel(feature.replace('_', ' ').title())
        ax.set_ylabel('密度')
        ax.set_title(f'{feature.replace("_", " ").title()}\\n分布对比', fontsize=12)
        ax.legend()
        ax.grid(True, alpha=0.3)
    
    # 7. 箱线图 - 展示异常值
    ax7 = plt.subplot(4, 3, 7)
    features_for_boxplot = top_discriminative[:5]
    df_subset = df[features_for_boxplot + ['target_name']]
    
    # 标准化数据用于可视化
    scaler = StandardScaler()
    df_scaled = df_subset.copy()
    df_scaled[features_for_boxplot] = scaler.fit_transform(df_scaled[features_for_boxplot])
    
    # 重构数据用于箱线图
    boxplot_data = []
    boxplot_labels = []
    for feature in features_for_boxplot:
        for class_name in ['malignant', 'benign']:
            data = df_scaled[df_scaled['target_name'] == class_name][feature]
            boxplot_data.append(data)
            boxplot_labels.append(f'{feature[:10]}\\n{class_name}')
    
    box_plot = ax7.boxplot(boxplot_data, labels=boxplot_labels, patch_artist=True)
    
    # 着色
    colors = ['red', 'green'] * len(features_for_boxplot)
    for patch, color in zip(box_plot['boxes'], colors):
        patch.set_facecolor(color)
        patch.set_alpha(0.6)
    
    ax7.set_title('特征分布箱线图 (标准化)', fontsize=12)
    ax7.tick_params(axis='x', rotation=45, labelsize=8)
    ax7.grid(True, alpha=0.3)
    
    # 8. PCA分析
    ax8 = plt.subplot(4, 3, 8)
    numeric_features = df.select_dtypes(include=[np.number]).columns.drop(['target'])
    
    # 执行PCA
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(df[numeric_features])
    
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_scaled)
    
    # 绘制PCA散点图
    colors = ['red' if t == 'malignant' else 'green' for t in df['target_name']]
    scatter = ax8.scatter(X_pca[:, 0], X_pca[:, 1], c=colors, alpha=0.6)
    
    ax8.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.1%} variance)')
    ax8.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.1%} variance)')
    ax8.set_title('PCA可视化', fontsize=12)
    ax8.grid(True, alpha=0.3)
    
    # 添加图例
    from matplotlib.lines import Line2D
    legend_elements = [Line2D([0], [0], marker='o', color='w', markerfacecolor='red', 
                             markersize=8, label='恶性'),
                      Line2D([0], [0], marker='o', color='w', markerfacecolor='green', 
                             markersize=8, label='良性')]
    ax8.legend(handles=legend_elements)
    
    # 9. 特征组对比 (mean vs worst)
    ax9 = plt.subplot(4, 3, 9)
    
    # 选择一些base特征，比较mean和worst版本
    base_features = ['radius', 'texture', 'perimeter', 'area', 'compactness']
    
    mean_features = [f + '_mean' for f in base_features]
    worst_features = [f + '_worst' for f in base_features] 
    
    # 计算恶性vs良性的差异
    mean_diffs = []
    worst_diffs = []
    
    for i, base_feature in enumerate(base_features):
        if mean_features[i] in df.columns and worst_features[i] in df.columns:
            # Mean特征的差异
            mean_diff = (df[df['target_name'] == 'malignant'][mean_features[i]].mean() - 
                        df[df['target_name'] == 'benign'][mean_features[i]].mean())
            mean_diffs.append(mean_diff)
            
            # Worst特征的差异
            worst_diff = (df[df['target_name'] == 'malignant'][worst_features[i]].mean() - 
                         df[df['target_name'] == 'benign'][worst_features[i]].mean())
            worst_diffs.append(worst_diff)
    
    x = np.arange(len(base_features))
    width = 0.35
    
    ax9.bar(x - width/2, mean_diffs, width, label='Mean特征', alpha=0.8)
    ax9.bar(x + width/2, worst_diffs, width, label='Worst特征', alpha=0.8)
    
    ax9.set_xlabel('基础特征')
    ax9.set_ylabel('恶性 - 良性 (平均差异)')
    ax9.set_title('Mean vs Worst特征比较', fontsize=12)
    ax9.set_xticks(x)
    ax9.set_xticklabels(base_features, rotation=45)
    ax9.legend()
    ax9.grid(True, alpha=0.3)
    
    # 10. 数据质量概览
    ax10 = plt.subplot(4, 3, 10)
    
    quality_metrics = {
        '完整性': (1 - df.isnull().sum().sum() / df.size) * 100,
        '一致性': (1 - df.duplicated().sum() / len(df)) * 100,
        '正态性': (len(dist_results['normal_features']) / len(numeric_features)) * 100,
        '平衡性': min(df['target'].value_counts()) / max(df['target'].value_counts()) * 100
    }
    
    metrics_names = list(quality_metrics.keys())
    metrics_values = list(quality_metrics.values())
    
    bars = ax10.bar(metrics_names, metrics_values, color=['#3498db', '#e74c3c', '#2ecc71', '#f39c12'])
    ax10.set_ylim(0, 100)
    ax10.set_ylabel('质量分数 (%)')
    ax10.set_title('数据质量概览', fontsize=12)
    
    # 添加数值标签
    for bar, value in zip(bars, metrics_values):
        height = bar.get_height()
        ax10.text(bar.get_x() + bar.get_width()/2., height + 1,
                 f'{value:.1f}%', ha='center', va='bottom')
    
    # 11. 特征缩放需要性分析
    ax11 = plt.subplot(4, 3, 11)
    
    # 计算每个特征的变异系数
    cv_values = []
    feature_names_short = []
    
    for feature in numeric_features[:15]:  # 选择前15个特征
        cv = df[feature].std() / df[feature].mean() if df[feature].mean() != 0 else 0
        cv_values.append(cv)
        feature_names_short.append(feature.split('_')[0])  # 简化特征名
    
    bars = ax11.bar(range(len(cv_values)), cv_values, color='orange', alpha=0.7)
    ax11.set_xlabel('特征')
    ax11.set_ylabel('变异系数 (CV)')
    ax11.set_title('特征缩放需要性\\n(变异系数分析)', fontsize=12)
    ax11.set_xticks(range(len(feature_names_short)))
    ax11.set_xticklabels(feature_names_short, rotation=45, fontsize=8)
    
    # 添加阈值线
    ax11.axhline(y=1.0, color='red', linestyle='--', alpha=0.7, label='高变异阈值')
    ax11.legend()
    ax11.grid(True, alpha=0.3)
    
    # 12. 医学意义解读
    ax12 = plt.subplot(4, 3, 12)
    ax12.axis('off')
    
    # 创建医学解读文本
    medical_insights = [
        "🔬 医学洞察:",
        "",
        f"• 数据集包含 {df.shape[0]} 个样本",
        f"• 恶性比例: {df['target'].value_counts()[0]/len(df):.1%}",
        f"• 良性比例: {df['target'].value_counts()[1]/len(df):.1%}",
        "",
        "🎯 关键区分特征:",
        f"• {list(class_results['feature_differences'].keys())[0]}",
        f"• {list(class_results['feature_differences'].keys())[1]}",
        f"• {list(class_results['feature_differences'].keys())[2]}",
        "",
        "📊 数据质量:",
        f"• 无缺失值 ✅",
        f"• 特征相关性: {len(corr_results['high_correlations'])} 对高相关",
        f"• 异常值检测完成 ✅",
        "",
        "💡 建议:",
        "• 需要特征标准化",
        "• 考虑降维技术",
        "• 注意类别不平衡"
    ]
    
    y_pos = 0.95
    for line in medical_insights:
        ax12.text(0.05, y_pos, line, transform=ax12.transAxes, 
                 fontsize=10, verticalalignment='top',
                 fontweight='bold' if line.startswith(('🔬', '🎯', '📊', '💡')) else 'normal')
        y_pos -= 0.04
    
    plt.tight_layout()
    
    # 保存图表
    analysis_path = RESULTS_DIR / "comprehensive_data_analysis.png"
    plt.savefig(analysis_path, dpi=300, bbox_inches='tight')
    print(f"📁 综合数据分析图已保存: {analysis_path}")
    
    plt.show()


def generate_data_report(df, stats_results, corr_results, dist_results, class_results):
    """
    生成数据分析报告
    
    Args:
        df: 数据DataFrame
        stats_results: 统计分析结果
        corr_results: 相关性分析结果
        dist_results: 分布分析结果
        class_results: 类别对比结果
        
    Returns:
        dict: 完整的数据分析报告
    """
    print("\\n📝 生成数据分析报告...")
    
    # 整合所有分析结果
    comprehensive_report = {
        'dataset_overview': {
            'name': '威斯康星乳腺癌数据集',
            'samples': df.shape[0],
            'features': df.shape[1] - 2,  # 除去target列
            'classes': df['target'].nunique(),
            'missing_values': df.isnull().sum().sum(),
            'class_distribution': stats_results['target_analysis']['class_counts']
        },
        
        'data_quality_assessment': {
            'completeness_score': (1 - df.isnull().sum().sum() / df.size) * 100,
            'consistency_score': (1 - df.duplicated().sum() / len(df)) * 100,
            'balance_score': min(df['target'].value_counts()) / max(df['target'].value_counts()) * 100,
            'normal_features_ratio': len(dist_results['normal_features']) / len(df.select_dtypes(include=[np.number]).columns.drop(['target'])) * 100
        },
        
        'key_findings': {
            'most_discriminative_features': list(class_results['feature_differences'].keys())[:5],
            'highly_correlated_pairs': len(corr_results['high_correlations']),
            'features_with_outliers': len([f for f, info in dist_results['outlier_analysis'].items() if info['count'] > 0]),
            'significant_differences': len([f for f, info in class_results['feature_differences'].items() if info['significant']])
        },
        
        'recommendations': {
            'preprocessing': [
                '建议进行特征标准化 (数据未标准化)',
                '考虑处理高相关性特征对',
                '检查和处理异常值',
                '考虑特征选择技术'
            ],
            'modeling': [
                '数据质量良好，适合AutoML',
                '类别轻微不平衡，考虑评估指标选择',
                '丰富的特征集，适合集成方法',
                '建议使用交叉验证'
            ],
            'validation': [
                '重点关注敏感性指标 (医疗场景)',
                '进行特征重要性分析',
                '考虑模型可解释性',
                '验证模型在不同子群的性能'
            ]
        },
        
        'medical_context': {
            'clinical_significance': '早期乳腺癌诊断对患者预后至关重要',
            'cost_of_errors': {
                'false_negative': '漏诊恶性肿瘤 - 延误治疗，威胁生命',
                'false_positive': '误诊良性肿瘤 - 不必要焦虑和检查'
            },
            'performance_priorities': {
                'primary': '敏感性 (sensitivity) - 正确识别恶性肿瘤',
                'secondary': '特异性 (specificity) - 正确识别良性肿瘤',
                'balance': '在敏感性和特异性之间找到最佳平衡'
            }
        }
    }
    
    # 保存报告为JSON
    import json
    report_path = Path("../results/data_analysis_report.json")
    report_path.parent.mkdir(exist_ok=True)
    
    with open(report_path, 'w', encoding='utf-8') as f:
        json.dump(comprehensive_report, f, indent=2, ensure_ascii=False)
    
    print(f"📁 数据分析报告已保存: {report_path}")
    
    return comprehensive_report


def main():
    """
    主函数 - 执行完整的数据分析流程
    """
    print("🔬 威斯康星乳腺癌数据集 - 深度数据分析")
    print("=" * 60)
    
    try:
        # 1. 加载数据
        df, dataset_info = load_dataset()
        
        # 2. 基础统计分析
        stats_results = basic_statistics_analysis(df)
        
        # 3. 相关性分析
        corr_results = correlation_analysis(df)
        
        # 4. 分布分析
        dist_results = distribution_analysis(df)
        
        # 5. 类别对比分析
        class_results = class_comparison_analysis(df)
        
        # 6. 创建可视化
        create_comprehensive_visualizations(df, stats_results, corr_results, 
                                          dist_results, class_results)
        
        # 7. 生成综合报告
        final_report = generate_data_report(df, stats_results, corr_results, 
                                          dist_results, class_results)
        
        print("\\n" + "=" * 60)
        print("✅ 数据分析完成!")
        print("\\n📊 主要发现:")
        print(f"   - 最具区分能力的特征: {final_report['key_findings']['most_discriminative_features'][0]}")
        print(f"   - 高相关性特征对: {final_report['key_findings']['highly_correlated_pairs']}对")
        print(f"   - 数据质量评分: {final_report['data_quality_assessment']['completeness_score']:.1f}/100")
        
        print("\\n📁 输出文件:")
        print("   - 可视化图表: results/visualizations/comprehensive_data_analysis.png")
        print("   - 分析报告: results/data_analysis_report.json")
        
        print("\\n🎯 下一步建议:")
        print("   1. 查看可视化图表，深入理解数据特征")
        print("   2. 阅读分析报告，了解数据质量和关键发现")
        print("   3. 运行 main.py 开始模型训练")
        print("   4. 根据数据洞察调整模型配置")
        
        return final_report
        
    except Exception as e:
        print(f"❌ 数据分析过程中出现错误: {e}")
        import traceback
        traceback.print_exc()
        return None


if __name__ == "__main__":
    # 执行数据分析
    report = main()
    
    if report:
        print("\\n🎉 数据分析成功完成!")
        print("\\n💡 学习要点:")
        print("1. 理解医疗数据的特殊性和重要性")
        print("2. 掌握完整的探索性数据分析流程") 
        print("3. 学会识别数据中的关键模式和异常")
        print("4. 为后续建模提供数据驱动的洞察")
    else:
        print("\\n❌ 数据分析失败，请检查错误信息并重试")