#!/usr/bin/env python3
"""
糖尿病数据集探索性数据分析(EDA)
===============================

本脚本提供糖尿病数据集的全面探索性数据分析，
包括数据质量检查、分布分析、相关性分析等。
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from scipy import stats
import warnings

warnings.filterwarnings('ignore')
plt.style.use('seaborn-v0_8')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class DiabetesDataAnalyzer:
    """糖尿病数据分析器"""
    
    def __init__(self):
        """初始化分析器"""
        self.feature_names = [
            'age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6'
        ]
        self.feature_descriptions = {
            'age': '年龄 (标准化)',
            'sex': '性别 (标准化)', 
            'bmi': '体重指数 (标准化)',
            'bp': '平均血压 (标准化)',
            's1': '血清胆固醇总量 (标准化)',
            's2': '低密度脂蛋白 (标准化)',
            's3': '高密度脂蛋白 (标准化)',
            's4': '血清甘油三酯 (标准化)',
            's5': '血糖水平 (标准化)',
            's6': '血糖耐受指标 (标准化)'
        }
        self.df = None
        
    def load_data(self):
        """加载糖尿病数据集"""
        print("正在加载糖尿病数据集...")
        
        diabetes = datasets.load_diabetes()
        X, y = diabetes.data, diabetes.target
        
        # 创建DataFrame
        self.df = pd.DataFrame(X, columns=self.feature_names)
        self.df['target'] = y
        self.df['severity_category'] = pd.cut(y, bins=3, labels=['轻度', '中度', '重度'])
        
        print(f"数据集加载完成: {self.df.shape}")
        return self.df
    
    def basic_info_analysis(self):
        """基础信息分析"""
        print("\\n📊 数据集基础信息")
        print("=" * 50)
        
        print(f"样本数量: {len(self.df)}")
        print(f"特征数量: {len(self.feature_names)}")
        print(f"目标变量: 疾病严重程度 (连续值)")
        
        print(f"\\n目标变量统计:")
        print(f"- 最小值: {self.df['target'].min():.1f}")
        print(f"- 最大值: {self.df['target'].max():.1f}")
        print(f"- 均值: {self.df['target'].mean():.1f}")
        print(f"- 标准差: {self.df['target'].std():.1f}")
        print(f"- 中位数: {self.df['target'].median():.1f}")
        
        # 严重程度分类分布
        print(f"\\n严重程度分布:")
        severity_counts = self.df['severity_category'].value_counts()
        for category, count in severity_counts.items():
            print(f"- {category}: {count} 例 ({count/len(self.df)*100:.1f}%)")
    
    def data_quality_check(self):
        """数据质量检查"""
        print("\\n🔍 数据质量检查")
        print("=" * 50)
        
        # 缺失值检查
        missing_values = self.df.isnull().sum()
        print("缺失值统计:")
        if missing_values.sum() == 0:
            print("✅ 无缺失值")
        else:
            print(missing_values)
        
        # 重复值检查
        duplicates = self.df.duplicated().sum()
        print(f"\\n重复值: {duplicates} 条")
        if duplicates == 0:
            print("✅ 无重复值")
        
        # 异常值检查 (使用IQR方法)
        print("\\n异常值检查 (IQR方法):")
        for col in self.feature_names + ['target']:
            Q1 = self.df[col].quantile(0.25)
            Q3 = self.df[col].quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            outliers = ((self.df[col] < lower_bound) | (self.df[col] > upper_bound)).sum()
            if outliers > 0:
                print(f"- {col}: {outliers} 个异常值 ({outliers/len(self.df)*100:.1f}%)")
        
        # 数据类型检查
        print(f"\\n数据类型:")
        print(self.df.dtypes)
    
    def descriptive_statistics(self):
        """描述性统计分析"""
        print("\\n📈 描述性统计")
        print("=" * 50)
        
        # 基础统计量
        desc_stats = self.df.describe()
        print("基础统计量:")
        print(desc_stats.round(3))
        
        # 偏度和峰度
        print(f"\\n偏度和峰度分析:")
        for col in self.feature_names + ['target']:
            skewness = stats.skew(self.df[col])
            kurtosis = stats.kurtosis(self.df[col])
            print(f"{self.feature_descriptions.get(col, col):15} - 偏度: {skewness:6.3f}, 峰度: {kurtosis:6.3f}")
    
    def correlation_analysis(self):
        """相关性分析"""
        print("\\n🔗 相关性分析")
        print("=" * 50)
        
        # 计算相关系数矩阵
        corr_matrix = self.df[self.feature_names + ['target']].corr()
        
        # 与目标变量的相关性
        target_corr = corr_matrix['target'].sort_values(key=abs, ascending=False)
        print("与目标变量的相关性 (按绝对值排序):")
        for feature, corr in target_corr.items():
            if feature != 'target':
                desc = self.feature_descriptions.get(feature, feature)
                print(f"{desc:20} : {corr:6.3f}")
        
        # 特征间高相关性检查
        print(f"\\n特征间高相关性检查 (|r| > 0.7):")
        high_corr_pairs = []
        for i in range(len(self.feature_names)):
            for j in range(i+1, len(self.feature_names)):
                corr = corr_matrix.iloc[i, j]
                if abs(corr) > 0.7:
                    feature1 = self.feature_names[i]
                    feature2 = self.feature_names[j]
                    high_corr_pairs.append((feature1, feature2, corr))
                    print(f"{feature1} - {feature2}: {corr:.3f}")
        
        if not high_corr_pairs:
            print("✅ 无高度相关的特征对")
        
        return corr_matrix
    
    def distribution_analysis(self):
        """分布分析"""
        print("\\n📊 特征分布分析")
        print("=" * 50)
        
        # 正态性检验
        print("正态性检验 (Shapiro-Wilk):")
        for col in self.feature_names + ['target']:
            statistic, p_value = stats.shapiro(self.df[col])
            is_normal = "正态" if p_value > 0.05 else "非正态"
            desc = self.feature_descriptions.get(col, col)
            print(f"{desc:20} : {is_normal} (p={p_value:.4f})")
    
    def create_visualizations(self):
        """创建可视化图表"""
        print("\\n正在生成可视化图表...")
        
        # 设置图表样式
        plt.style.use('seaborn-v0_8')
        
        # 1. 目标变量分布图
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('糖尿病数据集探索性分析', fontsize=16, fontweight='bold')
        
        # 目标变量直方图
        ax1 = axes[0, 0]
        ax1.hist(self.df['target'], bins=30, alpha=0.7, color='skyblue', edgecolor='black')
        ax1.set_xlabel('疾病严重程度')
        ax1.set_ylabel('频次')
        ax1.set_title('目标变量分布')
        ax1.grid(True, alpha=0.3)
        
        # 严重程度分类饼图
        ax2 = axes[0, 1]
        severity_counts = self.df['severity_category'].value_counts()
        colors = ['lightgreen', 'gold', 'lightcoral']
        ax2.pie(severity_counts.values, labels=severity_counts.index, autopct='%1.1f%%', 
                colors=colors, startangle=90)
        ax2.set_title('严重程度分类分布')
        
        # 特征分布箱线图
        ax3 = axes[1, 0]
        feature_data = self.df[self.feature_names].values
        ax3.boxplot(feature_data, labels=self.feature_names)
        ax3.set_xlabel('特征')
        ax3.set_ylabel('标准化值')
        ax3.set_title('特征分布箱线图')
        ax3.tick_params(axis='x', rotation=45)
        ax3.grid(True, alpha=0.3)
        
        # 目标变量Q-Q图
        ax4 = axes[1, 1]
        stats.probplot(self.df['target'], dist="norm", plot=ax4)
        ax4.set_title('目标变量正态性Q-Q图')
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 保存图表
        import os
        os.makedirs('../results', exist_ok=True)
        plt.savefig('../results/eda_overview.png', dpi=300, bbox_inches='tight')
        
        # 2. 相关性热图
        plt.figure(figsize=(12, 10))
        corr_matrix = self.df[self.feature_names + ['target']].corr()
        
        # 创建掩码只显示下三角
        mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
        
        sns.heatmap(corr_matrix, mask=mask, annot=True, cmap='RdBu_r', center=0,
                   square=True, linewidths=0.5, cbar_kws={"shrink": .8})
        plt.title('特征相关性热图', fontsize=14, fontweight='bold')
        plt.tight_layout()
        plt.savefig('../results/correlation_heatmap.png', dpi=300, bbox_inches='tight')
        
        # 3. 特征重要性分析图
        fig, axes = plt.subplots(2, 1, figsize=(12, 10))
        
        # 与目标变量的相关性条形图
        ax1 = axes[0]
        target_corr = corr_matrix['target'].drop('target').sort_values(key=abs, ascending=True)
        colors = ['red' if x < 0 else 'blue' for x in target_corr.values]
        bars = ax1.barh(range(len(target_corr)), target_corr.values, color=colors, alpha=0.7)
        ax1.set_yticks(range(len(target_corr)))
        ax1.set_yticklabels([self.feature_descriptions[name] for name in target_corr.index])
        ax1.set_xlabel('与目标变量的相关系数')
        ax1.set_title('特征与目标变量相关性')
        ax1.grid(True, alpha=0.3)
        ax1.axvline(x=0, color='black', linestyle='-', alpha=0.5)
        
        # 添加数值标签
        for i, bar in enumerate(bars):
            width = bar.get_width()
            ax1.text(width + 0.01 if width >= 0 else width - 0.01, bar.get_y() + bar.get_height()/2, 
                    f'{width:.3f}', ha='left' if width >= 0 else 'right', va='center')
        
        # 特征方差分析
        ax2 = axes[1]
        feature_variance = self.df[self.feature_names].var().sort_values(ascending=True)
        ax2.barh(range(len(feature_variance)), feature_variance.values, color='green', alpha=0.7)
        ax2.set_yticks(range(len(feature_variance)))
        ax2.set_yticklabels([self.feature_descriptions[name] for name in feature_variance.index])
        ax2.set_xlabel('方差')
        ax2.set_title('特征方差分析')
        ax2.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig('../results/feature_importance.png', dpi=300, bbox_inches='tight')
        
        print("✅ 可视化图表已保存至 ../results/ 目录")
    
    def medical_insights_analysis(self):
        """医学洞察分析"""
        print("\\n🏥 医学洞察分析")
        print("=" * 50)
        
        corr_matrix = self.df[self.feature_names + ['target']].corr()
        target_corr = corr_matrix['target'].drop('target')
        
        print("📋 关键医学发现:")
        
        # 找出最重要的预测因子
        top_predictors = target_corr.abs().nlargest(3)
        print(f"\\n🔍 最重要的预测因子:")
        for i, (feature, corr) in enumerate(top_predictors.items(), 1):
            desc = self.feature_descriptions[feature]
            direction = "正相关" if target_corr[feature] > 0 else "负相关"
            print(f"{i}. {desc} ({direction}, r={target_corr[feature]:.3f})")
        
        # 代谢综合征相关分析
        metabolic_features = ['bmi', 'bp', 's1', 's4', 's5']  # BMI, 血压, 胆固醇, 甘油三酯, 血糖
        print(f"\\n🩺 代谢综合征指标相关性:")
        for feature in metabolic_features:
            corr = target_corr[feature]
            desc = self.feature_descriptions[feature]
            risk_level = "高风险" if abs(corr) > 0.3 else "中风险" if abs(corr) > 0.2 else "低风险"
            print(f"- {desc}: {corr:.3f} ({risk_level})")
        
        # 性别和年龄因素
        print(f"\\n👥 人口统计学因素:")
        age_corr = target_corr['age']
        sex_corr = target_corr['sex']
        print(f"- 年龄影响: {age_corr:.3f} ({'随年龄增加' if age_corr > 0 else '随年龄减少'})")
        print(f"- 性别影响: {sex_corr:.3f} ({'差异显著' if abs(sex_corr) > 0.1 else '差异较小'})")
        
        # 风险分层建议
        print(f"\\n⚠️  临床风险分层建议:")
        high_risk_features = [f for f, c in target_corr.items() if abs(c) > 0.3]
        if high_risk_features:
            print("高优先级监测指标:")
            for feature in high_risk_features:
                print(f"- {self.feature_descriptions[feature]}")
        
        moderate_risk_features = [f for f, c in target_corr.items() if 0.2 < abs(c) <= 0.3]
        if moderate_risk_features:
            print("\\n中优先级监测指标:")
            for feature in moderate_risk_features:
                print(f"- {self.feature_descriptions[feature]}")
    
    def generate_data_report(self):
        """生成数据报告"""
        print("\\n📋 数据质量和建模建议报告")
        print("=" * 50)
        
        # 数据质量评估
        data_quality_score = 0
        total_checks = 4
        
        # 1. 完整性检查
        if self.df.isnull().sum().sum() == 0:
            data_quality_score += 1
            print("✅ 数据完整性: 优秀 (无缺失值)")
        else:
            print("⚠️  数据完整性: 需要处理缺失值")
        
        # 2. 一致性检查
        if self.df.duplicated().sum() == 0:
            data_quality_score += 1
            print("✅ 数据一致性: 优秀 (无重复值)")
        else:
            print("⚠️  数据一致性: 存在重复值")
        
        # 3. 分布合理性
        target_skew = abs(stats.skew(self.df['target']))
        if target_skew < 1:
            data_quality_score += 1
            print("✅ 目标变量分布: 良好 (偏度适中)")
        else:
            print("⚠️  目标变量分布: 偏度较大，可能需要变换")
        
        # 4. 特征相关性
        corr_matrix = self.df[self.feature_names].corr()
        max_corr = corr_matrix.abs().values[np.triu_indices_from(corr_matrix.values, k=1)].max()
        if max_corr < 0.9:
            data_quality_score += 1
            print("✅ 特征独立性: 良好 (无严重多重共线性)")
        else:
            print("⚠️  特征独立性: 存在高度相关特征")
        
        # 总体评分
        quality_percentage = (data_quality_score / total_checks) * 100
        print(f"\\n📊 数据质量总分: {data_quality_score}/{total_checks} ({quality_percentage:.0f}%)")
        
        # 建模建议
        print(f"\\n💡 建模建议:")
        if quality_percentage >= 75:
            print("✅ 数据质量良好，可直接进行建模")
        elif quality_percentage >= 50:
            print("⚠️  数据质量中等，建议优化后建模")
        else:
            print("❌ 数据质量需要显著改善")
        
        print("\\n🔧 具体优化建议:")
        print("- 考虑特征工程 (多项式特征、交互特征)")
        print("- 尝试不同的数据变换方法")
        print("- 探索集成学习方法")
        print("- 验证模型在不同子群体的表现")

def main():
    """主函数"""
    print("🔍 糖尿病数据集探索性分析")
    print("=" * 50)
    
    # 创建分析器
    analyzer = DiabetesDataAnalyzer()
    
    try:
        # 执行完整分析流程
        analyzer.load_data()
        analyzer.basic_info_analysis()
        analyzer.data_quality_check()
        analyzer.descriptive_statistics()
        analyzer.correlation_analysis()
        analyzer.distribution_analysis()
        analyzer.create_visualizations()
        analyzer.medical_insights_analysis()
        analyzer.generate_data_report()
        
        print("\\n🎉 探索性数据分析完成！")
        print("📂 图表已保存至 ../results/ 目录")
        
    except Exception as e:
        print(f"❌ 分析过程出错: {str(e)}")
        raise

if __name__ == "__main__":
    main()