"""
COM6004 数据挖掘项目 - Python 模板代码
作者：[你的名字]
学号：[你的学号]
项目：[项目标题]
"""

# ==================== 1. 导入必要的库 ====================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import (accuracy_score, precision_score, recall_score, 
                             f1_score, confusion_matrix, classification_report)

# 设置中文字体（可选）
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']  # Mac
# plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows
plt.rcParams['axes.unicode_minus'] = False

# 设置绘图风格
sns.set_style('whitegrid')

print("=" * 60)
print("COM6004 数据挖掘项目")
print("=" * 60)


# ==================== 2. 加载数据 ====================
def load_data(file_path):
    """
    加载数据集
    
    参数:
        file_path: 数据文件路径
    
    返回:
        df: DataFrame
    """
    print("\n[步骤 1] 加载数据...")
    df = pd.read_csv(file_path)
    print(f"✓ 数据加载成功！")
    print(f"  - 数据形状: {df.shape}")
    print(f"  - 样本数量: {df.shape[0]}")
    print(f"  - 特征数量: {df.shape[1]}")
    return df


# ==================== 3. 探索性数据分析 (EDA) ====================
def exploratory_data_analysis(df):
    """
    探索性数据分析
    
    参数:
        df: DataFrame
    """
    print("\n" + "=" * 60)
    print("[步骤 2] 探索性数据分析 (EDA)")
    print("=" * 60)
    
    # 基本信息
    print("\n--- 数据基本信息 ---")
    print(df.info())
    
    # 统计描述
    print("\n--- 数值特征统计描述 ---")
    print(df.describe())
    
    # 缺失值检查
    print("\n--- 缺失值检查 ---")
    missing = df.isnull().sum()
    missing_percent = 100 * missing / len(df)
    missing_df = pd.DataFrame({
        '缺失数量': missing,
        '缺失比例(%)': missing_percent
    })
    print(missing_df[missing_df['缺失数量'] > 0])
    
    # 数据类型分布
    print("\n--- 数据类型分布 ---")
    print(df.dtypes.value_counts())
    
    # 可视化：数值特征分布
    print("\n--- 生成可视化图表 ---")
    numeric_cols = df.select_dtypes(include=[np.number]).columns
    
    if len(numeric_cols) > 0:
        # 特征分布直方图
        plt.figure(figsize=(15, 10))
        df[numeric_cols].hist(bins=30, figsize=(15, 10))
        plt.suptitle('数值特征分布直方图', fontsize=16)
        plt.tight_layout()
        plt.savefig('eda_distributions.png', dpi=300, bbox_inches='tight')
        print("✓ 已保存: eda_distributions.png")
        
        # 相关性热力图
        plt.figure(figsize=(12, 10))
        correlation = df[numeric_cols].corr()
        sns.heatmap(correlation, annot=True, fmt='.2f', cmap='coolwarm', 
                   center=0, square=True)
        plt.title('特征相关性热力图', fontsize=16)
        plt.tight_layout()
        plt.savefig('eda_correlation.png', dpi=300, bbox_inches='tight')
        print("✓ 已保存: eda_correlation.png")
    
    plt.close('all')


# ==================== 4. 数据预处理 ====================
def data_preprocessing(df, target_column):
    """
    数据预处理
    
    参数:
        df: DataFrame
        target_column: 目标变量列名
    
    返回:
        X_train, X_test, y_train, y_test: 训练集和测试集
        scaler: 标准化器
    """
    print("\n" + "=" * 60)
    print("[步骤 3] 数据预处理")
    print("=" * 60)
    
    df_processed = df.copy()
    
    # 3.1 处理缺失值
    print("\n--- 处理缺失值 ---")
    # 数值型：用均值填充
    numeric_cols = df_processed.select_dtypes(include=[np.number]).columns
    for col in numeric_cols:
        if df_processed[col].isnull().sum() > 0:
            df_processed[col].fillna(df_processed[col].mean(), inplace=True)
            print(f"✓ {col}: 用均值填充")
    
    # 分类型：用众数填充
    categorical_cols = df_processed.select_dtypes(include=['object']).columns
    for col in categorical_cols:
        if df_processed[col].isnull().sum() > 0:
            df_processed[col].fillna(df_processed[col].mode()[0], inplace=True)
            print(f"✓ {col}: 用众数填充")
    
    # 3.2 编码分类变量
    print("\n--- 编码分类变量 ---")
    label_encoders = {}
    for col in categorical_cols:
        if col != target_column:
            le = LabelEncoder()
            df_processed[col] = le.fit_transform(df_processed[col])
            label_encoders[col] = le
            print(f"✓ {col}: Label Encoding")
    
    # 3.3 分离特征和目标变量
    print("\n--- 分离特征和目标变量 ---")
    X = df_processed.drop(columns=[target_column])
    y = df_processed[target_column]
    print(f"✓ 特征矩阵 X: {X.shape}")
    print(f"✓ 目标变量 y: {y.shape}")
    
    # 3.4 划分训练集和测试集
    print("\n--- 划分训练集和测试集 ---")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    print(f"✓ 训练集: {X_train.shape}")
    print(f"✓ 测试集: {X_test.shape}")
    
    # 3.5 特征标准化
    print("\n--- 特征标准化 ---")
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    print("✓ 标准化完成")
    
    return X_train_scaled, X_test_scaled, y_train, y_test, scaler


# ==================== 5. 模型训练与评估 ====================
def train_and_evaluate_models(X_train, X_test, y_train, y_test):
    """
    训练和评估多个模型
    
    参数:
        X_train, X_test, y_train, y_test: 训练集和测试集
    
    返回:
        results: 模型结果字典
        best_model: 最佳模型
    """
    print("\n" + "=" * 60)
    print("[步骤 4] 模型训练与评估")
    print("=" * 60)
    
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
    from sklearn.svm import SVC
    from sklearn.linear_model import LogisticRegression
    from sklearn.neighbors import KNeighborsClassifier
    
    # 定义模型
    models = {
        'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000),
        'Decision Tree': DecisionTreeClassifier(random_state=42),
        'Random Forest': RandomForestClassifier(random_state=42, n_estimators=100),
        'SVM': SVC(random_state=42),
        'KNN': KNeighborsClassifier(),
        'Gradient Boosting': GradientBoostingClassifier(random_state=42)
    }
    
    results = {}
    
    print("\n--- 训练模型 ---")
    for name, model in models.items():
        print(f"\n训练 {name}...")
        
        # 训练
        model.fit(X_train, y_train)
        
        # 预测
        y_pred_train = model.predict(X_train)
        y_pred_test = model.predict(X_test)
        
        # 评估
        train_acc = accuracy_score(y_train, y_pred_train)
        test_acc = accuracy_score(y_test, y_pred_test)
        precision = precision_score(y_test, y_pred_test, average='weighted')
        recall = recall_score(y_test, y_pred_test, average='weighted')
        f1 = f1_score(y_test, y_pred_test, average='weighted')
        
        # 交叉验证
        cv_scores = cross_val_score(model, X_train, y_train, cv=5)
        
        results[name] = {
            'model': model,
            'train_accuracy': train_acc,
            'test_accuracy': test_acc,
            'precision': precision,
            'recall': recall,
            'f1_score': f1,
            'cv_mean': cv_scores.mean(),
            'cv_std': cv_scores.std(),
            'predictions': y_pred_test
        }
        
        print(f"  训练准确率: {train_acc:.4f}")
        print(f"  测试准确率: {test_acc:.4f}")
        print(f"  交叉验证: {cv_scores.mean():.4f} (+/- {cv_scores.std():.4f})")
    
    # 结果对比
    print("\n" + "=" * 60)
    print("模型性能对比")
    print("=" * 60)
    
    comparison_df = pd.DataFrame({
        '模型': list(results.keys()),
        '训练准确率': [results[m]['train_accuracy'] for m in results],
        '测试准确率': [results[m]['test_accuracy'] for m in results],
        'Precision': [results[m]['precision'] for m in results],
        'Recall': [results[m]['recall'] for m in results],
        'F1-Score': [results[m]['f1_score'] for m in results],
        'CV均值': [results[m]['cv_mean'] for m in results],
    })
    
    print(comparison_df.to_string(index=False))
    
    # 可视化模型对比
    plt.figure(figsize=(12, 6))
    x = np.arange(len(results))
    width = 0.35
    
    plt.bar(x - width/2, comparison_df['训练准确率'], width, label='训练准确率', alpha=0.8)
    plt.bar(x + width/2, comparison_df['测试准确率'], width, label='测试准确率', alpha=0.8)
    
    plt.xlabel('模型')
    plt.ylabel('准确率')
    plt.title('模型性能对比')
    plt.xticks(x, comparison_df['模型'], rotation=45, ha='right')
    plt.legend()
    plt.tight_layout()
    plt.savefig('model_comparison.png', dpi=300, bbox_inches='tight')
    print("\n✓ 已保存: model_comparison.png")
    
    # 选择最佳模型
    best_model_name = comparison_df.loc[comparison_df['测试准确率'].idxmax(), '模型']
    best_model = results[best_model_name]['model']
    
    print(f"\n🏆 最佳模型: {best_model_name}")
    print(f"   测试准确率: {results[best_model_name]['test_accuracy']:.4f}")
    
    return results, best_model, best_model_name


# ==================== 6. 详细评估最佳模型 ====================
def detailed_evaluation(best_model, best_model_name, X_test, y_test, results):
    """
    详细评估最佳模型
    
    参数:
        best_model: 最佳模型
        best_model_name: 最佳模型名称
        X_test, y_test: 测试集
        results: 所有模型结果
    """
    print("\n" + "=" * 60)
    print(f"[步骤 5] 详细评估最佳模型: {best_model_name}")
    print("=" * 60)
    
    y_pred = results[best_model_name]['predictions']
    
    # 混淆矩阵
    print("\n--- 混淆矩阵 ---")
    cm = confusion_matrix(y_test, y_pred)
    print(cm)
    
    # 可视化混淆矩阵
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title(f'混淆矩阵 - {best_model_name}')
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.tight_layout()
    plt.savefig('confusion_matrix.png', dpi=300, bbox_inches='tight')
    print("✓ 已保存: confusion_matrix.png")
    
    # 分类报告
    print("\n--- 分类报告 ---")
    print(classification_report(y_test, y_pred))
    
    # 特征重要性（如果模型支持）
    if hasattr(best_model, 'feature_importances_'):
        print("\n--- 特征重要性 ---")
        importances = best_model.feature_importances_
        indices = np.argsort(importances)[::-1]
        
        plt.figure(figsize=(10, 6))
        plt.bar(range(len(importances)), importances[indices])
        plt.title(f'特征重要性 - {best_model_name}')
        plt.xlabel('特征索引')
        plt.ylabel('重要性')
        plt.tight_layout()
        plt.savefig('feature_importance.png', dpi=300, bbox_inches='tight')
        print("✓ 已保存: feature_importance.png")
        
        # 打印前10个重要特征
        print("\n前10个重要特征:")
        for i in range(min(10, len(importances))):
            print(f"  {i+1}. 特征 {indices[i]}: {importances[indices[i]]:.4f}")
    
    plt.close('all')


# ==================== 7. 主函数 ====================
def main():
    """
    主函数：执行完整的数据挖掘流程
    """
    # 配置参数
    DATA_FILE = 'your_dataset.csv'  # 替换为你的数据文件
    TARGET_COLUMN = 'target'  # 替换为你的目标变量列名
    
    try:
        # 步骤1: 加载数据
        df = load_data(DATA_FILE)
        
        # 步骤2: 探索性数据分析
        exploratory_data_analysis(df)
        
        # 步骤3: 数据预处理
        X_train, X_test, y_train, y_test, scaler = data_preprocessing(df, TARGET_COLUMN)
        
        # 步骤4: 模型训练与评估
        results, best_model, best_model_name = train_and_evaluate_models(
            X_train, X_test, y_train, y_test
        )
        
        # 步骤5: 详细评估
        detailed_evaluation(best_model, best_model_name, X_test, y_test, results)
        
        print("\n" + "=" * 60)
        print("✅ 项目分析完成！")
        print("=" * 60)
        print("\n生成的文件:")
        print("  1. eda_distributions.png - 特征分布图")
        print("  2. eda_correlation.png - 相关性热力图")
        print("  3. model_comparison.png - 模型对比图")
        print("  4. confusion_matrix.png - 混淆矩阵")
        print("  5. feature_importance.png - 特征重要性（如适用）")
        
    except Exception as e:
        print(f"\n❌ 错误: {e}")
        import traceback
        traceback.print_exc()


# ==================== 运行主函数 ====================
if __name__ == "__main__":
    main()


