import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score, confusion_matrix
from imblearn.ensemble import EasyEnsembleClassifier, BalancedBaggingClassifier
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import joblib

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei"]  # 增加更多中文字体备选
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题
warnings.filterwarnings('ignore')  # 忽略警告


# 创建保存图表和模型的目录
def create_directories():
    for dir_name in ['models', 'plots']:
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)


# 1. 数据准备与合并
def load_and_merge_data():
    try:
        # 读取特征数据和训练标签数据
        if not os.path.exists('final_features_with_pca.csv'):
            raise FileNotFoundError("特征数据文件 'final_features_with_pca.csv' 不存在")
        if not os.path.exists('train.csv'):
            raise FileNotFoundError("训练数据文件 'train.csv' 不存在")

        features = pd.read_csv('final_features_with_pca.csv')
        train_data = pd.read_csv('train.csv')

        # 重命名列以匹配（确保商家ID列名一致）
        train_data = train_data.rename(columns={'merchant_id': 'seller_id'})

        # 合并特征和标签
        merged_data = pd.merge(train_data, features, on=['user_id', 'seller_id'], how='left')

        # 检查合并后的数据
        print(f"合并后数据形状: {merged_data.shape}")
        print(f"标签分布:\n{merged_data['label'].value_counts(normalize=True)}")

        return merged_data
    except Exception as e:
        print(f"数据加载和合并出错: {str(e)}")
        return None


# 2. 数据处理
def prepare_data(merged_data):
    if merged_data is None:
        return None, None, None, None, None, None

    try:
        # 分离特征和标签
        X = merged_data.drop(columns=['user_id', 'seller_id', 'label'])
        y = merged_data['label']

        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )

        print(f"训练集形状: {X_train.shape}, 测试集形状: {X_test.shape}")
        print(f"训练集标签分布:\n{y_train.value_counts(normalize=True)}")
        print(f"测试集标签分布:\n{y_test.value_counts(normalize=True)}")

        return X, y, X_train, X_test, y_train, y_test
    except Exception as e:
        print(f"数据处理出错: {str(e)}")
        return None, None, None, None, None, None


# 3. 模型训练与评估函数
def train_and_evaluate(model, X_train, y_train, X_test, y_test, model_name):
    """训练模型并评估性能"""
    try:
        print(f"\n===== 训练 {model_name} 模型 =====")

        # 训练模型
        model.fit(X_train, y_train)

        # 预测
        y_pred = model.predict(X_test)
        y_prob = model.predict_proba(X_test)[:, 1]  # 正类的概率

        # 评估指标
        print("\n分类报告:")
        print(classification_report(y_test, y_pred))

        # AUC分数
        auc = roc_auc_score(y_test, y_prob)
        print(f"AUC分数: {auc:.4f}")

        # 混淆矩阵
        cm = confusion_matrix(y_test, y_pred)
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                    xticklabels=['非重购', '重购'],
                    yticklabels=['非重购', '重购'])
        plt.xlabel('预测')
        plt.ylabel('实际')
        plt.title(f'{model_name} 混淆矩阵')

        # 保存图表前设置正确的编码
        plt.savefig(f'plots/{model_name}_confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.close()

        # 特征重要性（如果可用）
        if hasattr(model, 'feature_importances_'):
            feature_importances = pd.DataFrame({
                'feature': X_train.columns,
                'importance': model.feature_importances_
            }).sort_values('importance', ascending=False)

            plt.figure(figsize=(12, 8))
            sns.barplot(x='importance', y='feature', data=feature_importances.head(15))
            plt.title(f'{model_name} - 前15个重要特征')
            plt.tight_layout()

            # 保存图表前设置正确的编码
            plt.savefig(f'plots/{model_name}_feature_importances.png', dpi=300, bbox_inches='tight')
            plt.close()

        return model, auc
    except Exception as e:
        print(f"{model_name} 模型训练和评估出错: {str(e)}")
        return None, 0


def main():
    # 创建目录
    create_directories()

    # 加载数据
    merged_data = load_and_merge_data()

    # 准备数据
    X, y, X_train, X_test, y_train, y_test = prepare_data(merged_data)

    if X is None:
        print("数据准备失败，程序退出")
        return

    # 4. 使用不同算法处理不平衡数据
    models = []

    # 4.1 EasyEnsemble算法
    easy_ensemble = EasyEnsembleClassifier(
        n_estimators=50,
        random_state=42,
        n_jobs=-1  # 使用所有可用的CPU核心
    )
    ee_model, ee_auc = train_and_evaluate(easy_ensemble, X_train, y_train, X_test, y_test, "EasyEnsemble")
    if ee_model is not None:
        models.append((ee_model, ee_auc, "EasyEnsemble"))

    # 4.2 平衡Bagging算法
    balanced_bagging = BalancedBaggingClassifier(
        estimator=XGBClassifier(random_state=42, eval_metric='logloss'),
        n_estimators=30,
        sampling_strategy='auto',
        replacement=False,
        random_state=42,
        n_jobs=-1
    )
    bb_model, bb_auc = train_and_evaluate(balanced_bagging, X_train, y_train, X_test, y_test, "平衡Bagging")
    if bb_model is not None:
        models.append((bb_model, bb_auc, "平衡Bagging"))

    # 4.3 梯度提升树算法（XGBoost）
    # 计算正负样本比例以设置scale_pos_weight
    pos_count = y_train.sum()
    neg_count = len(y_train) - pos_count
    scale_pos_weight = neg_count / pos_count if pos_count > 0 else 1.0

    print(f"\n正样本数: {pos_count}, 负样本数: {neg_count}, scale_pos_weight: {scale_pos_weight:.2f}")

    xgb_model = XGBClassifier(
        scale_pos_weight=scale_pos_weight,
        random_state=42,
        eval_metric='logloss',
        n_estimators=200,
        learning_rate=0.1,
        max_depth=5,
        subsample=0.8,
        colsample_bytree=0.8,
        n_jobs=-1
    )
    xgb_model, xgb_auc = train_and_evaluate(xgb_model, X_train, y_train, X_test, y_test, "XGBoost")
    if xgb_model is not None:
        models.append((xgb_model, xgb_auc, "XGBoost"))

    if not models:
        print("所有模型训练失败，程序退出")
        return

    # 5. 模型比较与选择
    print("\n===== 模型性能比较 =====")
    for model, auc, name in models:
        print(f"{name} AUC: {auc:.4f}")

    # 可视化比较
    plt.figure(figsize=(10, 6))
    model_names = [name for _, _, name in models]
    auc_scores = [auc for _, auc, _ in models]
    sns.barplot(x=model_names, y=auc_scores)
    plt.title('模型AUC比较')
    plt.ylabel('AUC分数')
    plt.ylim(0.5, 1.0)
    for i, v in enumerate(auc_scores):
        plt.text(i, v + 0.01, f"{v:.4f}", ha='center')

    # 保存图表前设置正确的编码
    plt.savefig('plots/model_comparison.png', dpi=300, bbox_inches='tight')
    plt.close()

    # 6. 完整训练最佳模型
    best_model = max(models, key=lambda x: x[1])[0]
    best_model_name = max(models, key=lambda x: x[1])[2]

    print(f"\n选择{best_model_name}作为最佳模型")

    # 在整个数据集上重新训练最佳模型
    print("\n使用全部数据训练最佳模型...")
    try:
        best_model.fit(X, y)

        # 保存模型
        joblib.dump(best_model, 'models/best_imbalanced_model.pkl')
        print("模型已保存为 models/best_imbalanced_model.pkl")

    except Exception as e:
        print(f"训练最终模型或生成预测结果时出错: {str(e)}")


if __name__ == "__main__":
    main()