# -*- coding: utf-8 -*-
"""
真实数据集应用示例

本文件展示如何使用NumPy和Pandas处理真实世界的开源数据集，
包含数据加载、清洗、分析和可视化的完整流程。

数据集来源：
1. Iris花卉数据集 - 经典机器学习数据集
2. 泰坦尼克号乘客数据 - 生存预测数据集
3. 波士顿房价数据 - 回归分析数据集
4. 股票价格数据 - 时间序列分析
5. 学生成绩数据 - 教育数据分析
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体和图表样式
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False
sns.set_style("whitegrid")

class IrisDatasetAnalysis:
    """
    Iris花卉数据集分析
    
    经典的机器学习数据集，包含150个鸢尾花样本，
    每个样本有4个特征：花萼长度、花萼宽度、花瓣长度、花瓣宽度
    """
    
    @staticmethod
    def create_iris_dataset():
        """
        创建Iris数据集（模拟真实数据）
        
        返回：
            pd.DataFrame: Iris数据集
        """
        np.random.seed(42)
        
        # 创建三种鸢尾花的数据
        species_data = []
        
        # Setosa (山鸢尾)
        setosa_data = {
            'sepal_length': np.random.normal(5.0, 0.4, 50),
            'sepal_width': np.random.normal(3.4, 0.4, 50),
            'petal_length': np.random.normal(1.5, 0.2, 50),
            'petal_width': np.random.normal(0.2, 0.1, 50),
            'species': ['setosa'] * 50
        }
        
        # Versicolor (变色鸢尾)
        versicolor_data = {
            'sepal_length': np.random.normal(5.9, 0.5, 50),
            'sepal_width': np.random.normal(2.8, 0.3, 50),
            'petal_length': np.random.normal(4.3, 0.5, 50),
            'petal_width': np.random.normal(1.3, 0.2, 50),
            'species': ['versicolor'] * 50
        }
        
        # Virginica (维吉尼亚鸢尾)
        virginica_data = {
            'sepal_length': np.random.normal(6.5, 0.6, 50),
            'sepal_width': np.random.normal(3.0, 0.3, 50),
            'petal_length': np.random.normal(5.5, 0.6, 50),
            'petal_width': np.random.normal(2.0, 0.3, 50),
            'species': ['virginica'] * 50
        }
        
        # 合并数据
        all_data = {}
        for key in setosa_data.keys():
            if key != 'species':
                all_data[key] = np.concatenate([
                    setosa_data[key], 
                    versicolor_data[key], 
                    virginica_data[key]
                ])
            else:
                all_data[key] = setosa_data[key] + versicolor_data[key] + virginica_data[key]
        
        # 确保数值为正
        for col in ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']:
            all_data[col] = np.abs(all_data[col])
        
        df = pd.DataFrame(all_data)
        return df
    
    @staticmethod
    def analyze_iris_dataset():
        """
        分析Iris数据集
        
        返回：
            tuple: 包含各种分析结果
        """
        # 加载数据
        df = IrisDatasetAnalysis.create_iris_dataset()
        
        print("=== Iris花卉数据集分析 ===")
        print(f"数据集形状: {df.shape}")
        print(f"\n数据集基本信息:")
        print(df.info())
        
        # 1. 描述性统计
        print("\n1. 描述性统计:")
        desc_stats = df.describe()
        print(desc_stats)
        
        # 2. 按物种分组的统计
        print("\n2. 按物种分组的统计:")
        species_stats = df.groupby('species').agg({
            'sepal_length': ['mean', 'std', 'min', 'max'],
            'sepal_width': ['mean', 'std', 'min', 'max'],
            'petal_length': ['mean', 'std', 'min', 'max'],
            'petal_width': ['mean', 'std', 'min', 'max']
        }).round(2)
        print(species_stats)
        
        # 3. 相关性分析
        print("\n3. 特征相关性分析:")
        numeric_cols = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
        correlation_matrix = df[numeric_cols].corr()
        print(correlation_matrix.round(3))
        
        # 4. 使用NumPy进行高级分析
        print("\n4. NumPy高级分析:")
        
        # 计算每个物种的协方差矩阵
        species_covariance = {}
        for species in df['species'].unique():
            species_data = df[df['species'] == species][numeric_cols].values
            cov_matrix = np.cov(species_data.T)
            species_covariance[species] = cov_matrix
            print(f"\n{species}的协方差矩阵:")
            print(cov_matrix.round(3))
        
        # 5. 主成分分析准备（特征标准化）
        print("\n5. 特征标准化（为PCA准备）:")
        features = df[numeric_cols].values
        
        # 标准化
        mean_features = np.mean(features, axis=0)
        std_features = np.std(features, axis=0)
        standardized_features = (features - mean_features) / std_features
        
        print(f"原始特征均值: {mean_features.round(3)}")
        print(f"原始特征标准差: {std_features.round(3)}")
        print(f"标准化后均值: {np.mean(standardized_features, axis=0).round(3)}")
        print(f"标准化后标准差: {np.std(standardized_features, axis=0).round(3)}")
        
        # 6. 异常值检测
        print("\n6. 异常值检测（使用IQR方法）:")
        outliers_info = {}
        for col in numeric_cols:
            Q1 = df[col].quantile(0.25)
            Q3 = df[col].quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            outliers = df[(df[col] < lower_bound) | (df[col] > upper_bound)]
            outliers_info[col] = len(outliers)
            print(f"{col}: {len(outliers)} 个异常值")
        
        return {
            'dataset': df,
            'desc_stats': desc_stats,
            'species_stats': species_stats,
            'correlation_matrix': correlation_matrix,
            'species_covariance': species_covariance,
            'standardized_features': standardized_features,
            'outliers_info': outliers_info
        }

class TitanicDatasetAnalysis:
    """
    泰坦尼克号数据集分析
    
    分析泰坦尼克号乘客的生存情况，
    包含乘客的年龄、性别、船票等级、票价等信息
    """
    
    @staticmethod
    def create_titanic_dataset():
        """
        创建泰坦尼克号数据集（模拟真实数据）
        
        返回：
            pd.DataFrame: 泰坦尼克号数据集
        """
        np.random.seed(42)
        n_passengers = 891
        
        # 创建乘客数据
        data = {
            'passenger_id': range(1, n_passengers + 1),
            'pclass': np.random.choice([1, 2, 3], n_passengers, p=[0.24, 0.21, 0.55]),
            'sex': np.random.choice(['male', 'female'], n_passengers, p=[0.65, 0.35]),
            'age': np.random.normal(30, 12, n_passengers),
            'sibsp': np.random.choice([0, 1, 2, 3, 4], n_passengers, p=[0.68, 0.23, 0.06, 0.02, 0.01]),
            'parch': np.random.choice([0, 1, 2, 3, 4], n_passengers, p=[0.76, 0.13, 0.08, 0.02, 0.01]),
            'fare': np.random.lognormal(2.5, 1.2, n_passengers),
            'embarked': np.random.choice(['C', 'Q', 'S'], n_passengers, p=[0.19, 0.09, 0.72])
        }
        
        df = pd.DataFrame(data)
        
        # 确保年龄在合理范围内
        df['age'] = np.clip(df['age'], 0.5, 80)
        
        # 根据船票等级调整票价
        df.loc[df['pclass'] == 1, 'fare'] *= 3
        df.loc[df['pclass'] == 2, 'fare'] *= 1.5
        
        # 生成生存状态（基于历史统计规律）
        survival_prob = np.zeros(n_passengers)
        
        # 女性生存率更高
        survival_prob[df['sex'] == 'female'] += 0.6
        survival_prob[df['sex'] == 'male'] += 0.2
        
        # 头等舱生存率更高
        survival_prob[df['pclass'] == 1] += 0.3
        survival_prob[df['pclass'] == 2] += 0.1
        
        # 儿童生存率更高
        survival_prob[df['age'] < 16] += 0.2
        
        # 确保概率在0-1之间
        survival_prob = np.clip(survival_prob, 0, 1)
        
        # 生成生存状态
        df['survived'] = np.random.binomial(1, survival_prob)
        
        # 添加一些缺失值（模拟真实数据）
        missing_age_indices = np.random.choice(df.index, size=int(0.2 * len(df)), replace=False)
        df.loc[missing_age_indices, 'age'] = np.nan
        
        return df
    
    @staticmethod
    def analyze_titanic_dataset():
        """
        分析泰坦尼克号数据集
        
        返回：
            tuple: 包含各种分析结果
        """
        # 加载数据
        df = TitanicDatasetAnalysis.create_titanic_dataset()
        
        print("\n=== 泰坦尼克号数据集分析 ===")
        print(f"数据集形状: {df.shape}")
        
        # 1. 生存率统计
        print("\n1. 总体生存率:")
        overall_survival = df['survived'].mean()
        print(f"总体生存率: {overall_survival:.2%}")
        
        # 2. 按性别的生存率
        print("\n2. 按性别的生存率:")
        gender_survival = df.groupby('sex')['survived'].agg(['count', 'sum', 'mean'])
        gender_survival.columns = ['总人数', '生存人数', '生存率']
        gender_survival['生存率'] = gender_survival['生存率'].apply(lambda x: f"{x:.2%}")
        print(gender_survival)
        
        # 3. 按船票等级的生存率
        print("\n3. 按船票等级的生存率:")
        class_survival = df.groupby('pclass')['survived'].agg(['count', 'sum', 'mean'])
        class_survival.columns = ['总人数', '生存人数', '生存率']
        class_survival['生存率'] = class_survival['生存率'].apply(lambda x: f"{x:.2%}")
        print(class_survival)
        
        # 4. 年龄分析
        print("\n4. 年龄分析:")
        print(f"平均年龄: {df['age'].mean():.1f}岁")
        print(f"年龄中位数: {df['age'].median():.1f}岁")
        print(f"缺失年龄数据: {df['age'].isnull().sum()}人 ({df['age'].isnull().mean():.1%})")
        
        # 按年龄组分析生存率
        df['age_group'] = pd.cut(df['age'], bins=[0, 16, 30, 50, 80], 
                                labels=['儿童(0-16)', '青年(17-30)', '中年(31-50)', '老年(51+)'])
        age_survival = df.groupby('age_group')['survived'].agg(['count', 'mean'])
        age_survival.columns = ['人数', '生存率']
        age_survival['生存率'] = age_survival['生存率'].apply(lambda x: f"{x:.2%}")
        print("\n按年龄组的生存率:")
        print(age_survival)
        
        # 5. 票价分析
        print("\n5. 票价分析:")
        fare_stats = df['fare'].describe()
        print(fare_stats)
        
        # 6. 家庭规模分析
        print("\n6. 家庭规模分析:")
        df['family_size'] = df['sibsp'] + df['parch'] + 1
        df['family_type'] = pd.cut(df['family_size'], bins=[0, 1, 4, 20], 
                                  labels=['独自一人', '小家庭(2-4人)', '大家庭(5+人)'])
        
        family_survival = df.groupby('family_type')['survived'].agg(['count', 'mean'])
        family_survival.columns = ['人数', '生存率']
        family_survival['生存率'] = family_survival['生存率'].apply(lambda x: f"{x:.2%}")
        print(family_survival)
        
        # 7. 使用NumPy进行高级分析
        print("\n7. NumPy高级分析:")
        
        # 生存率的置信区间计算
        survived_array = df['survived'].values
        n = len(survived_array)
        p = np.mean(survived_array)
        se = np.sqrt(p * (1 - p) / n)
        confidence_interval = (p - 1.96 * se, p + 1.96 * se)
        print(f"生存率95%置信区间: [{confidence_interval[0]:.3f}, {confidence_interval[1]:.3f}]")
        
        # 8. 交叉分析
        print("\n8. 性别和船票等级的交叉分析:")
        cross_analysis = pd.crosstab([df['sex'], df['pclass']], df['survived'], 
                                   margins=True, normalize='index')
        print(cross_analysis.round(3))
        
        return {
            'dataset': df,
            'overall_survival': overall_survival,
            'gender_survival': gender_survival,
            'class_survival': class_survival,
            'age_survival': age_survival,
            'family_survival': family_survival,
            'cross_analysis': cross_analysis
        }

class StockPriceAnalysis:
    """
    股票价格时间序列分析
    
    分析股票价格的时间序列数据，
    包含价格趋势、波动率、技术指标等分析
    """
    
    @staticmethod
    def create_stock_dataset():
        """
        创建股票价格数据集
        
        返回：
            pd.DataFrame: 股票价格数据集
        """
        np.random.seed(42)
        
        # 创建时间序列
        start_date = '2022-01-01'
        end_date = '2023-12-31'
        dates = pd.date_range(start=start_date, end=end_date, freq='D')
        
        # 过滤掉周末（简化处理）
        dates = dates[dates.weekday < 5]
        
        # 生成多只股票的价格数据
        stocks = ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA']
        stock_data = []
        
        for stock in stocks:
            # 使用几何布朗运动生成价格
            n_days = len(dates)
            
            # 不同股票的参数
            if stock == 'AAPL':
                initial_price, drift, volatility = 150, 0.0008, 0.02
            elif stock == 'GOOGL':
                initial_price, drift, volatility = 2500, 0.0006, 0.025
            elif stock == 'MSFT':
                initial_price, drift, volatility = 300, 0.0007, 0.022
            elif stock == 'AMZN':
                initial_price, drift, volatility = 3200, 0.0005, 0.03
            else:  # TSLA
                initial_price, drift, volatility = 800, 0.001, 0.04
            
            # 生成价格序列
            returns = np.random.normal(drift, volatility, n_days)
            prices = [initial_price]
            
            for ret in returns:
                new_price = prices[-1] * (1 + ret)
                prices.append(max(new_price, 1))  # 确保价格为正
            
            prices = prices[1:]  # 移除初始价格
            
            # 生成成交量（与价格变化相关）
            price_changes = np.diff(np.log(prices))
            base_volume = np.random.normal(1000000, 200000, n_days)
            volume_multiplier = 1 + np.abs(np.concatenate([[0], price_changes])) * 5
            volumes = np.maximum(base_volume * volume_multiplier, 100000)
            
            # 计算开盘价、最高价、最低价
            for i, (date, price, volume) in enumerate(zip(dates, prices, volumes)):
                daily_volatility = volatility * np.random.uniform(0.5, 1.5)
                
                # 开盘价（基于前一日收盘价）
                if i == 0:
                    open_price = price * np.random.uniform(0.99, 1.01)
                else:
                    open_price = prices[i-1] * np.random.uniform(0.98, 1.02)
                
                # 最高价和最低价
                high_price = max(open_price, price) * np.random.uniform(1.0, 1.02)
                low_price = min(open_price, price) * np.random.uniform(0.98, 1.0)
                
                stock_data.append({
                    'date': date,
                    'symbol': stock,
                    'open': round(open_price, 2),
                    'high': round(high_price, 2),
                    'low': round(low_price, 2),
                    'close': round(price, 2),
                    'volume': int(volume)
                })
        
        df = pd.DataFrame(stock_data)
        df = df.sort_values(['symbol', 'date']).reset_index(drop=True)
        
        return df
    
    @staticmethod
    def analyze_stock_dataset():
        """
        分析股票价格数据集
        
        返回：
            tuple: 包含各种分析结果
        """
        # 加载数据
        df = StockPriceAnalysis.create_stock_dataset()
        
        print("\n=== 股票价格数据集分析 ===")
        print(f"数据集形状: {df.shape}")
        print(f"股票数量: {df['symbol'].nunique()}")
        print(f"时间范围: {df['date'].min()} 到 {df['date'].max()}")
        
        # 1. 计算技术指标
        print("\n1. 计算技术指标:")
        
        def calculate_technical_indicators(group):
            """计算技术指标"""
            # 日收益率
            group['daily_return'] = group['close'].pct_change()
            
            # 移动平均线
            group['ma_5'] = group['close'].rolling(window=5).mean()
            group['ma_20'] = group['close'].rolling(window=20).mean()
            group['ma_50'] = group['close'].rolling(window=50).mean()
            
            # 布林带
            group['bb_middle'] = group['close'].rolling(window=20).mean()
            bb_std = group['close'].rolling(window=20).std()
            group['bb_upper'] = group['bb_middle'] + 2 * bb_std
            group['bb_lower'] = group['bb_middle'] - 2 * bb_std
            
            # RSI (相对强弱指数)
            delta = group['close'].diff()
            gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
            loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
            rs = gain / loss
            group['rsi'] = 100 - (100 / (1 + rs))
            
            # 波动率（20日滚动）
            group['volatility'] = group['daily_return'].rolling(window=20).std() * np.sqrt(252)
            
            return group
        
        df_with_indicators = df.groupby('symbol').apply(calculate_technical_indicators).reset_index(drop=True)
        
        # 2. 股票表现统计
        print("\n2. 股票表现统计:")
        performance_stats = df_with_indicators.groupby('symbol').agg({
            'close': ['first', 'last'],
            'daily_return': ['mean', 'std'],
            'volume': 'mean',
            'volatility': 'mean'
        }).round(4)
        
        # 计算总收益率
        performance_stats[('total_return', '')] = (performance_stats[('close', 'last')] / 
                                                  performance_stats[('close', 'first')] - 1) * 100
        
        # 计算年化收益率
        performance_stats[('annual_return', '')] = performance_stats[('daily_return', 'mean')] * 252 * 100
        
        # 计算夏普比率（假设无风险利率为2%）
        risk_free_rate = 0.02
        performance_stats[('sharpe_ratio', '')] = ((performance_stats[('daily_return', 'mean')] * 252 - risk_free_rate) / 
                                                  (performance_stats[('daily_return', 'std')] * np.sqrt(252)))
        
        print(performance_stats)
        
        # 3. 相关性分析
        print("\n3. 股票价格相关性分析:")
        price_pivot = df.pivot(index='date', columns='symbol', values='close')
        correlation_matrix = price_pivot.corr()
        print(correlation_matrix.round(3))
        
        # 4. 使用NumPy进行高级分析
        print("\n4. NumPy高级分析:")
        
        # 计算投资组合的风险和收益
        returns_matrix = price_pivot.pct_change().dropna().values
        
        # 等权重投资组合
        equal_weights = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
        portfolio_returns = np.dot(returns_matrix, equal_weights)
        
        portfolio_stats = {
            'annual_return': np.mean(portfolio_returns) * 252,
            'annual_volatility': np.std(portfolio_returns) * np.sqrt(252),
            'sharpe_ratio': (np.mean(portfolio_returns) * 252 - risk_free_rate) / (np.std(portfolio_returns) * np.sqrt(252)),
            'max_drawdown': np.min(np.minimum.accumulate(np.cumprod(1 + portfolio_returns)) / np.maximum.accumulate(np.cumprod(1 + portfolio_returns)) - 1)
        }
        
        print("等权重投资组合统计:")
        for key, value in portfolio_stats.items():
            if 'return' in key or 'volatility' in key:
                print(f"{key}: {value:.2%}")
            else:
                print(f"{key}: {value:.4f}")
        
        # 5. 风险价值(VaR)计算
        print("\n5. 风险价值(VaR)分析:")
        var_95 = {}
        var_99 = {}
        
        for symbol in df['symbol'].unique():
            symbol_returns = df_with_indicators[df_with_indicators['symbol'] == symbol]['daily_return'].dropna()
            var_95[symbol] = np.percentile(symbol_returns, 5)
            var_99[symbol] = np.percentile(symbol_returns, 1)
        
        var_df = pd.DataFrame({
            'VaR_95%': var_95,
            'VaR_99%': var_99
        })
        print(var_df.round(4))
        
        # 6. 趋势分析
        print("\n6. 趋势分析（最近30天）:")
        recent_data = df_with_indicators.groupby('symbol').tail(30)
        trend_analysis = recent_data.groupby('symbol').agg({
            'close': ['first', 'last'],
            'ma_20': 'last',
            'rsi': 'last'
        })
        
        trend_analysis[('trend', '')] = ((trend_analysis[('close', 'last')] / 
                                        trend_analysis[('close', 'first')] - 1) * 100)
        
        print(trend_analysis.round(2))
        
        return {
            'dataset': df_with_indicators,
            'performance_stats': performance_stats,
            'correlation_matrix': correlation_matrix,
            'portfolio_stats': portfolio_stats,
            'var_analysis': var_df,
            'trend_analysis': trend_analysis
        }

class EducationDataAnalysis:
    """
    教育数据分析
    
    分析学生成绩、出勤率、学习行为等教育相关数据
    """
    
    @staticmethod
    def create_education_dataset():
        """
        创建教育数据集
        
        返回：
            pd.DataFrame: 教育数据集
        """
        np.random.seed(42)
        n_students = 1000
        
        # 创建学生基本信息
        data = {
            'student_id': range(1, n_students + 1),
            'grade': np.random.choice([9, 10, 11, 12], n_students, p=[0.25, 0.25, 0.25, 0.25]),
            'gender': np.random.choice(['Male', 'Female'], n_students, p=[0.52, 0.48]),
            'age': np.random.randint(14, 19, n_students),
            'family_income': np.random.choice(['Low', 'Medium', 'High'], n_students, p=[0.3, 0.5, 0.2]),
            'parent_education': np.random.choice(['High School', 'Bachelor', 'Master', 'PhD'], 
                                               n_students, p=[0.4, 0.35, 0.2, 0.05]),
            'study_hours_per_week': np.random.normal(15, 5, n_students),
            'attendance_rate': np.random.beta(8, 2, n_students),  # 偏向高出勤率
            'extracurricular_activities': np.random.randint(0, 6, n_students)
        }
        
        df = pd.DataFrame(data)
        
        # 确保数据在合理范围内
        df['study_hours_per_week'] = np.clip(df['study_hours_per_week'], 0, 50)
        df['attendance_rate'] = np.clip(df['attendance_rate'], 0.5, 1.0)
        
        # 生成各科成绩（基于多个因素）
        subjects = ['Math', 'English', 'Science', 'History', 'Art']
        
        for subject in subjects:
            # 基础分数
            base_score = np.random.normal(75, 10, n_students)
            
            # 年级影响（高年级稍难）
            grade_effect = (12 - df['grade']) * 2
            
            # 学习时间影响
            study_effect = (df['study_hours_per_week'] - 15) * 0.5
            
            # 出勤率影响
            attendance_effect = (df['attendance_rate'] - 0.8) * 25
            
            # 家庭收入影响
            income_effect = np.where(df['family_income'] == 'High', 5,
                                   np.where(df['family_income'] == 'Medium', 2, -2))
            
            # 父母教育影响
            parent_edu_effect = np.where(df['parent_education'] == 'PhD', 8,
                                       np.where(df['parent_education'] == 'Master', 5,
                                              np.where(df['parent_education'] == 'Bachelor', 2, 0)))
            
            # 课外活动影响（适度有益）
            activity_effect = np.where(df['extracurricular_activities'] <= 3, 
                                     df['extracurricular_activities'] * 1.5,
                                     3 * 1.5 - (df['extracurricular_activities'] - 3) * 0.5)
            
            # 计算最终成绩
            final_score = (base_score + grade_effect + study_effect + 
                          attendance_effect + income_effect + 
                          parent_edu_effect + activity_effect)
            
            # 添加一些随机噪声
            final_score += np.random.normal(0, 3, n_students)
            
            # 确保成绩在0-100范围内
            df[f'{subject}_score'] = np.clip(final_score, 0, 100)
        
        # 计算总分和平均分
        score_columns = [f'{subject}_score' for subject in subjects]
        df['total_score'] = df[score_columns].sum(axis=1)
        df['average_score'] = df[score_columns].mean(axis=1)
        
        # 添加一些缺失值
        missing_indices = np.random.choice(df.index, size=int(0.05 * len(df)), replace=False)
        df.loc[missing_indices, 'study_hours_per_week'] = np.nan
        
        return df
    
    @staticmethod
    def analyze_education_dataset():
        """
        分析教育数据集
        
        返回：
            tuple: 包含各种分析结果
        """
        # 加载数据
        df = EducationDataAnalysis.create_education_dataset()
        
        print("\n=== 教育数据集分析 ===")
        print(f"数据集形状: {df.shape}")
        print(f"学生数量: {len(df)}")
        
        # 1. 基本统计信息
        print("\n1. 基本统计信息:")
        print(f"平均年龄: {df['age'].mean():.1f}岁")
        print(f"平均学习时间: {df['study_hours_per_week'].mean():.1f}小时/周")
        print(f"平均出勤率: {df['attendance_rate'].mean():.1%}")
        print(f"平均总分: {df['average_score'].mean():.1f}分")
        
        # 2. 成绩分布分析
        print("\n2. 成绩分布分析:")
        subjects = ['Math', 'English', 'Science', 'History', 'Art']
        score_stats = df[[f'{subject}_score' for subject in subjects]].describe()
        print(score_stats.round(2))
        
        # 3. 按年级分析
        print("\n3. 按年级分析:")
        grade_analysis = df.groupby('grade').agg({
            'average_score': ['mean', 'std', 'count'],
            'study_hours_per_week': 'mean',
            'attendance_rate': 'mean'
        }).round(2)
        print(grade_analysis)
        
        # 4. 按性别分析
        print("\n4. 按性别分析:")
        gender_analysis = df.groupby('gender').agg({
            'average_score': ['mean', 'std'],
            'Math_score': 'mean',
            'English_score': 'mean',
            'Science_score': 'mean'
        }).round(2)
        print(gender_analysis)
        
        # 5. 家庭背景影响分析
        print("\n5. 家庭背景影响分析:")
        
        # 家庭收入影响
        income_analysis = df.groupby('family_income')['average_score'].agg(['mean', 'std', 'count'])
        print("\n按家庭收入:")
        print(income_analysis.round(2))
        
        # 父母教育影响
        parent_edu_analysis = df.groupby('parent_education')['average_score'].agg(['mean', 'std', 'count'])
        print("\n按父母教育水平:")
        print(parent_edu_analysis.round(2))
        
        # 6. 学习行为分析
        print("\n6. 学习行为分析:")
        
        # 学习时间与成绩的关系
        df['study_time_group'] = pd.cut(df['study_hours_per_week'], 
                                       bins=[0, 10, 20, 30, 50], 
                                       labels=['<10h', '10-20h', '20-30h', '>30h'])
        
        study_time_analysis = df.groupby('study_time_group')['average_score'].agg(['mean', 'count'])
        print("\n按学习时间分组:")
        print(study_time_analysis.round(2))
        
        # 出勤率与成绩的关系
        df['attendance_group'] = pd.cut(df['attendance_rate'], 
                                       bins=[0, 0.8, 0.9, 0.95, 1.0], 
                                       labels=['<80%', '80-90%', '90-95%', '>95%'])
        
        attendance_analysis = df.groupby('attendance_group')['average_score'].agg(['mean', 'count'])
        print("\n按出勤率分组:")
        print(attendance_analysis.round(2))
        
        # 7. 使用NumPy进行高级分析
        print("\n7. NumPy高级分析:")
        
        # 计算各科目之间的相关性
        score_columns = [f'{subject}_score' for subject in subjects]
        score_matrix = df[score_columns].values
        correlation_matrix = np.corrcoef(score_matrix.T)
        
        print("\n科目相关性矩阵:")
        corr_df = pd.DataFrame(correlation_matrix, 
                              index=subjects, 
                              columns=subjects)
        print(corr_df.round(3))
        
        # 主成分分析准备
        # 标准化成绩数据
        standardized_scores = (score_matrix - np.mean(score_matrix, axis=0)) / np.std(score_matrix, axis=0)
        
        # 计算协方差矩阵
        cov_matrix = np.cov(standardized_scores.T)
        
        # 计算特征值和特征向量
        eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
        
        # 解释方差比例
        explained_variance_ratio = eigenvalues / np.sum(eigenvalues)
        
        print("\n主成分分析结果:")
        for i, ratio in enumerate(explained_variance_ratio):
            print(f"主成分{i+1}解释方差比例: {ratio:.3f}")
        
        # 8. 成绩预测因子分析
        print("\n8. 成绩预测因子分析:")
        
        # 计算各因子与平均成绩的相关性
        factors = ['study_hours_per_week', 'attendance_rate', 'extracurricular_activities']
        correlations = {}
        
        for factor in factors:
            if factor in df.columns:
                corr = np.corrcoef(df[factor].fillna(df[factor].mean()), df['average_score'])[0, 1]
                correlations[factor] = corr
        
        print("与平均成绩的相关性:")
        for factor, corr in correlations.items():
            print(f"{factor}: {corr:.3f}")
        
        # 9. 异常值检测
        print("\n9. 异常值检测:")
        
        # 使用IQR方法检测成绩异常值
        Q1 = df['average_score'].quantile(0.25)
        Q3 = df['average_score'].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        
        outliers = df[(df['average_score'] < lower_bound) | (df['average_score'] > upper_bound)]
        print(f"成绩异常值数量: {len(outliers)} ({len(outliers)/len(df):.1%})")
        
        if len(outliers) > 0:
            print("\n异常值学生特征:")
            outlier_analysis = outliers.groupby('grade').agg({
                'average_score': ['mean', 'count'],
                'study_hours_per_week': 'mean',
                'attendance_rate': 'mean'
            })
            print(outlier_analysis.round(2))
        
        return {
            'dataset': df,
            'score_stats': score_stats,
            'grade_analysis': grade_analysis,
            'gender_analysis': gender_analysis,
            'income_analysis': income_analysis,
            'parent_edu_analysis': parent_edu_analysis,
            'correlation_matrix': corr_df,
            'explained_variance_ratio': explained_variance_ratio,
            'factor_correlations': correlations,
            'outliers': outliers
        }

def run_all_analyses():
    """
    运行所有数据集分析
    """
    print("=" * 80)
    print("真实数据集应用示例 - 完整分析")
    print("=" * 80)
    
    # 1. Iris数据集分析
    iris_results = IrisDatasetAnalysis.analyze_iris_dataset()
    
    # 2. 泰坦尼克号数据集分析
    titanic_results = TitanicDatasetAnalysis.analyze_titanic_dataset()
    
    # 3. 股票价格分析
    stock_results = StockPriceAnalysis.analyze_stock_dataset()
    
    # 4. 教育数据分析
    education_results = EducationDataAnalysis.analyze_education_dataset()
    
    print("\n" + "=" * 80)
    print("🎉 所有数据集分析完成！")
    print("=" * 80)
    
    return {
        'iris': iris_results,
        'titanic': titanic_results,
        'stock': stock_results,
        'education': education_results
    }

def demonstrate_key_insights():
    """
    展示关键洞察和学习要点
    """
    print("\n" + "=" * 80)
    print("📊 关键洞察和学习要点")
    print("=" * 80)
    
    print("\n🔍 NumPy在数据分析中的应用:")
    print("- 高效的数值计算和数组操作")
    print("- 统计分析：均值、标准差、相关性")
    print("- 线性代数：特征值、特征向量、矩阵运算")
    print("- 随机数生成和概率分布")
    print("- 数据标准化和归一化")
    
    print("\n📈 Pandas在数据处理中的优势:")
    print("- 灵活的数据结构：DataFrame和Series")
    print("- 强大的数据清洗和预处理功能")
    print("- 便捷的分组聚合操作")
    print("- 时间序列数据处理")
    print("- 数据透视和交叉分析")
    
    print("\n🎯 实际应用场景:")
    print("- 金融：股票分析、风险管理、投资组合优化")
    print("- 教育：学生成绩分析、教学效果评估")
    print("- 商业：客户行为分析、销售预测")
    print("- 科研：实验数据分析、模式识别")
    
    print("\n💡 最佳实践:")
    print("- 数据质量检查：缺失值、异常值、重复值")
    print("- 探索性数据分析：描述统计、可视化")
    print("- 特征工程：创建新特征、数据变换")
    print("- 性能优化：向量化操作、内存管理")
    print("- 结果验证：交叉验证、统计显著性检验")

if __name__ == "__main__":
    # 运行所有分析
    results = run_all_analyses()
    
    # 展示关键洞察
    demonstrate_key_insights()
    
    print("\n" + "=" * 80)
    print("✅ 真实数据集应用示例演示完成！")
    print("=" * 80)