# -*- coding:utf-8 -*-
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, precision_score, recall_score, roc_curve, auc, \
    confusion_matrix, precision_recall_curve
from sklearn.preprocessing import OrdinalEncoder
import matplotlib.pyplot as plt
import matplotlib as mpl
import catboost as cb
import xgboost as xgb
import pandas as pd
import numpy as np
import dataset as ds
import os
import time
import logging
import seaborn as sns
import joblib

# 配置中文字体
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 创建目录
os.makedirs('./model', exist_ok=True)
os.makedirs('./result', exist_ok=True)
os.makedirs('./feature_selection', exist_ok=True)


# 将集成模型类定义为全局类，以便能够被pickle序列化
class EnsembleModel:
    def __init__(self, model1, model2):
        self.model1 = model1
        self.model2 = model2

    def predict_proba(self, X):
        proba1 = self.model1.predict_proba(X)
        proba2 = self.model2.predict_proba(X)
        return (proba1 + proba2) / 2

    def predict(self, X):
        proba = self.predict_proba(X)
        return (proba[:, 1] > 0.5).astype(int)


def preprocess_categorical_features(x_train, x_test, categorical_features):
    """预处理分类特征，使用OrdinalEncoder处理未知类别"""
    encoders = {}
    x_train_encoded = x_train.copy()
    x_test_encoded = x_test.copy()

    for feature in categorical_features:
        # 确保特征是字符串类型
        x_train_encoded[feature] = x_train[feature].astype(str)
        x_test_encoded[feature] = x_test[feature].astype(str)

        # 训练编码器并转换训练集
        encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
        x_train_encoded[feature] = encoder.fit_transform(x_train_encoded[[feature]]).flatten()

        # 转换测试集，捕获可能的错误
        try:
            x_test_encoded[feature] = encoder.transform(x_test_encoded[[feature]]).flatten()
        except ValueError as e:
            logger.warning(f"处理特征 {feature} 时出错: {e}")
            # 如果转换失败，将未知类别设置为-1
            x_test_encoded[feature] = -1

        encoders[feature] = encoder

    return x_train_encoded, x_test_encoded, encoders


def evaluate_model(model, x_test, y_test):
    """评估模型性能"""
    start_time = time.time()
    if hasattr(model, 'predict_proba'):
        y_pred = model.predict_proba(x_test)
        y_pred_class = model.predict(x_test)
    else:  # 对于集成模型，我们使用平均概率
        y_pred = model.predict_proba(x_test)
        y_pred_class = (y_pred[:, 1] > 0.5).astype(int)
    prediction_time = time.time() - start_time

    auc_score = roc_auc_score(y_test, y_pred[:, 1])
    acc = accuracy_score(y_test, y_pred_class)
    f1 = f1_score(y_test, y_pred_class)
    precision = precision_score(y_test, y_pred_class)
    recall = recall_score(y_test, y_pred_class)

    cm = confusion_matrix(y_test, y_pred_class)
    tn, fp, fn, tp = cm.ravel()

    logger.info(f"评估结果:")
    logger.info(f"  AUC: {auc_score:.6f}")
    logger.info(f"  准确率: {acc:.4f}")
    logger.info(f"  F1分数: {f1:.4f}")
    logger.info(f"  精确率: {precision:.4f}")
    logger.info(f"  召回率: {recall:.4f}")
    logger.info(f"  TP: {tp}, FP: {fp}, FN: {fn}, TN: {tn}")
    logger.info(f"  预测耗时: {prediction_time:.4f}秒")

    return {
        'auc': auc_score,
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall,
        'confusion_matrix': cm
    }


def save_feature_importance(model, feature_names, prefix=""):
    """保存特征重要性"""
    try:
        # 尝试获取特征重要性
        if hasattr(model, 'get_feature_importance'):
            feature_importance = model.get_feature_importance()
        elif hasattr(model, 'feature_importances_'):
            feature_importance = model.feature_importances_
        else:
            logger.warning(f"模型 {type(model)} 没有特征重要性方法")
            return None

        # 确保特征重要性是数组
        if not isinstance(feature_importance, np.ndarray):
            feature_importance = np.array(feature_importance)

        # 检查特征重要性是否为空
        if len(feature_importance) == 0:
            logger.warning("特征重要性数组为空")
            return None

        # 确保特征名称列表与特征重要性长度匹配
        if len(feature_importance) != len(feature_names):
            logger.error(f"特征重要性数量({len(feature_importance)})与特征名称数量({len(feature_names)})不匹配")
            # 使用模型特征数量截取特征名称
            feature_names = feature_names[:len(feature_importance)]
            logger.warning(f"已截断特征名称列表为: {len(feature_names)}")

        # 排序索引
        sorted_idx = np.argsort(feature_importance)[::-1]

        # 创建图表
        plt.figure(figsize=(12, 8))
        plt.title(f"{prefix}特征重要性")
        plt.bar(range(len(feature_importance)), feature_importance[sorted_idx], align='center')
        plt.xticks(range(len(feature_importance)), np.array(feature_names)[sorted_idx], rotation=90)
        plt.tight_layout()

        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        plt.savefig(f'./result/{prefix}feature_importance_{timestamp}.png')
        plt.close()

        importance_df = pd.DataFrame({
            'feature': np.array(feature_names)[sorted_idx],
            'importance': feature_importance[sorted_idx]
        })
        importance_df.to_csv(f'./result/{prefix}feature_importance_{timestamp}.csv', index=False)
        return importance_df

    except Exception as e:
        logger.error(f"保存特征重要性时出错: {e}")
        return None


def plot_roc_curve(y_true, y_proba, timestamp):
    """绘制ROC曲线"""
    fpr, tpr, _ = roc_curve(y_true, y_proba)
    roc_auc = auc(fpr, tpr)

    plt.figure(figsize=(10, 8))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {roc_auc:.4f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title('接收者操作特征曲线')
    plt.legend(loc="lower right")
    plt.grid(True)

    plt.savefig(f'./result/roc_curve_{timestamp}.png')
    plt.close()
    return roc_auc


def plot_precision_recall_curve(y_true, y_proba, timestamp):
    """绘制精确率-召回率曲线"""
    precision, recall, _ = precision_recall_curve(y_true, y_proba)
    average_precision = np.mean(precision)

    plt.figure(figsize=(10, 8))
    plt.plot(recall, precision, color='darkgreen', lw=2, label=f'精确率-召回率曲线 (AP = {average_precision:.4f})')
    plt.xlabel('召回率')
    plt.ylabel('精确率')
    plt.title('精确率-召回率曲线')
    plt.legend(loc="upper right")
    plt.grid(True)

    plt.savefig(f'./result/precision_recall_curve_{timestamp}.png')
    plt.close()
    return average_precision


def plot_confusion_matrix(cm, timestamp):
    """绘制混淆矩阵"""
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['负类', '正类'],
                yticklabels=['负类', '正类'])
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.title('混淆矩阵')
    plt.savefig(f'./result/confusion_matrix_{timestamp}.png')
    plt.close()


def cross_validate_model(x, y, categorical_features, n_splits=5):
    """执行交叉验证"""
    logger.info(f"开始 {n_splits} 折交叉验证...")
    skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
    fold_metrics = []

    for fold, (train_idx, val_idx) in enumerate(skf.split(x, y)):
        logger.info(f"训练折叠 {fold + 1}/{n_splits}")
        x_train, x_val = x.iloc[train_idx], x.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]

        # 预处理分类特征
        x_train, x_val, _ = preprocess_categorical_features(x_train, x_val, categorical_features)

        scale_pos_weight = len(y_train[y_train == 0]) / len(y_train[y_train == 1])

        model = cb.CatBoostClassifier(
            iterations=2000,
            learning_rate=0.05,
            depth=6,
            l2_leaf_reg=5,
            loss_function='Logloss',
            eval_metric='AUC',
            random_seed=42,
            scale_pos_weight=scale_pos_weight,
            verbose=100
        )

        model.fit(
            x_train, y_train,
            eval_set=(x_val, y_val),
            use_best_model=True
        )

        y_pred = model.predict_proba(x_val)
        auc_score = roc_auc_score(y_val, y_pred[:, 1])
        y_pred_class = model.predict(x_val)
        f1 = f1_score(y_val, y_pred_class)

        fold_metrics.append({
            'auc': auc_score,
            'f1': f1
        })
        logger.info(f"折叠 {fold + 1} 结果 - AUC: {auc_score:.4f}, F1: {f1:.4f}")

    avg_auc = np.mean([m['auc'] for m in fold_metrics])
    avg_f1 = np.mean([m['f1'] for m in fold_metrics])
    logger.info(f"交叉验证完成 - 平均AUC: {avg_auc:.4f}, 平均F1: {avg_f1:.4f}")
    return avg_auc, avg_f1


def train_ensemble_model(x_train, y_train, x_val, y_val, scale_pos_weight, feature_names, categorical_features):
    """训练集成模型（分别训练CatBoost和XGBoost，然后组合）"""
    # 预处理分类特征
    x_train_encoded, x_val_encoded, encoders = preprocess_categorical_features(x_train, x_val, categorical_features)

    logger.info("训练CatBoost模型...")
    cat_model = cb.CatBoostClassifier(
        iterations=3000,
        learning_rate=0.05,
        depth=6,
        l2_leaf_reg=5,
        loss_function='Logloss',
        eval_metric='AUC',
        random_seed=42,
        scale_pos_weight=scale_pos_weight,
        verbose=100
    )

    cat_model.fit(
        x_train_encoded, y_train,
        eval_set=(x_val_encoded, y_val),
        use_best_model=True
    )

    logger.info("训练XGBoost模型...")
    xgb_model = xgb.XGBClassifier(
        n_estimators=1000,
        learning_rate=0.05,
        max_depth=6,
        subsample=0.8,
        colsample_bytree=0.8,
        objective='binary:logistic',
        eval_metric='auc',
        random_state=42,
        scale_pos_weight=scale_pos_weight,
        n_jobs=-1
    )

    # 修正XGBoost的eval_set参数使用方式
    xgb_model.fit(
        x_train_encoded, y_train,
        eval_set=[(x_val_encoded, y_val)],
        early_stopping_rounds=50,  # 添加早停机制
        verbose=100
    )

    # 保存特征重要性
    save_feature_importance(cat_model, feature_names, prefix="catboost_")
    save_feature_importance(xgb_model, feature_names, prefix="xgboost_")

    # 使用全局定义的集成模型类
    ensemble_model = EnsembleModel(cat_model, xgb_model)
    return ensemble_model, encoders


def train():
    try:
        logger.info("加载训练数据...")
        x, y, feature_names = ds.load_data()
        logger.info(f"数据集形状: x={x.shape}, y={y.shape}")
        logger.info(f"特征数量: {len(feature_names)}")

        # 识别分类特征
        categorical_features = [col for col in x.columns if x[col].dtype == 'object' or x[col].dtype.name == 'category']
        logger.info(f"分类特征: {categorical_features}")

        # 分析类别分布
        class_counts = y.value_counts()
        imbalance_ratio = class_counts[0] / class_counts[1]
        logger.info(f"类别分布: 负类={class_counts[0]}, 正类={class_counts[1]}, 不平衡比例: {imbalance_ratio:.2f}:1")

        # 执行交叉验证
        avg_auc, avg_f1 = cross_validate_model(x, y, categorical_features, n_splits=3)

        logger.info("划分训练集和测试集...")
        x_train, x_test, y_train, y_test = train_test_split(
            x, y, test_size=0.3, random_state=42, stratify=y
        )
        logger.info(f"训练集: {x_train.shape}, 测试集: {x_test.shape}")
        logger.info(f"训练集类别分布: 负类={sum(y_train == 0)}, 正类={sum(y_train == 1)}")
        logger.info(f"测试集类别分布: 负类={sum(y_test == 0)}, 正类={sum(y_test == 1)}")

        # 计算类别权重
        scale_pos_weight = len(y_train[y_train == 0]) / len(y_train[y_train == 1])
        logger.info(f"类别权重(scale_pos_weight): {scale_pos_weight:.2f}")

        # 训练集成模型
        ensemble_model, encoders = train_ensemble_model(
            x_train, y_train, x_test, y_test, scale_pos_weight, feature_names, categorical_features
        )
        logger.info("集成模型训练完成")

        # 预处理测试集的分类特征
        x_test_encoded = x_test.copy()
        for feature in categorical_features:
            if feature in encoders:
                # 确保特征是字符串类型
                x_test_encoded[feature] = x_test[feature].astype(str)
                # 转换测试集
                x_test_encoded[feature] = encoders[feature].transform(x_test_encoded[[feature]]).flatten()

        # 评估模型
        metrics = evaluate_model(ensemble_model, x_test_encoded, y_test)
        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())

        # 绘制评估图表
        y_pred_proba = ensemble_model.predict_proba(x_test_encoded)[:, 1]
        plot_roc_curve(y_test, y_pred_proba, timestamp)
        plot_precision_recall_curve(y_test, y_pred_proba, timestamp)
        plot_confusion_matrix(metrics['confusion_matrix'], timestamp)

        # 特征选择
        logger.info("进行特征选择...")
        # 使用CatBoost模型的特征重要性
        cat_model = ensemble_model.model1  # 因为我们的集成模型第一个是CatBoost
        importance = cat_model.get_feature_importance()
        feature_importance = pd.DataFrame({'feature': feature_names, 'importance': importance})
        top_features = feature_importance.nlargest(20, 'importance')['feature'].tolist()
        logger.info(f"Top 20重要特征: {', '.join(top_features)}")

        # 保存特征选择器
        feature_selector = {'top_features': top_features}
        joblib.dump(feature_selector, './feature_selection/feature_selector.pkl')

        # 使用重要特征重新训练
        logger.info("使用Top20特征重新训练模型...")
        x_train_top = x_train[top_features]
        x_test_top = x_test[top_features]

        # 识别新特征集中的分类特征
        top_categorical_features = [col for col in top_features if col in categorical_features]

        top_ensemble_model, top_encoders = train_ensemble_model(
            x_train_top, y_train, x_test_top, y_test, scale_pos_weight, top_features, top_categorical_features
        )

        # 预处理测试集的分类特征
        x_test_top_encoded = x_test_top.copy()
        for feature in top_categorical_features:
            if feature in top_encoders:
                # 确保特征是字符串类型
                x_test_top_encoded[feature] = x_test_top[feature].astype(str)
                # 转换测试集
                x_test_top_encoded[feature] = top_encoders[feature].transform(x_test_top_encoded[[feature]]).flatten()

        # 评估新模型
        top_metrics = evaluate_model(top_ensemble_model, x_test_top_encoded, y_test)
        logger.info(f"特征选择后AUC提升: {top_metrics['auc'] - metrics['auc']:.6f}")

        # 保存模型
        logger.info("保存模型文件...")
        model_name = f'ensemble_model_auc_{top_metrics["auc"]:.4f}_f1_{top_metrics["f1"]:.4f}_{timestamp}'
        model_path = f'./model/{model_name}.joblib'
        # 保存整个集成模型
        joblib.dump(top_ensemble_model, model_path)

        # 保存特征名称
        with open(f'./model/{model_name}_features.txt', 'w') as f:
            f.write("\n".join(top_features))

        # 保存标签编码器
        joblib.dump(top_encoders, f'./model/{model_name}_label_encoders.joblib')

        logger.info(f"模型已保存至: {model_path}")

        return top_ensemble_model, top_metrics

    except Exception as e:
        logger.error(f"训练过程中发生错误: {e}", exc_info=True)
        return None, None


if __name__ == '__main__':
    model, metrics = train()
    if model and metrics:
        logger.info("训练成功完成!")
        logger.info(f"最终AUC分数: {metrics['auc']:.6f}")
        logger.info(f"最终F1分数: {metrics['f1']:.6f}")
    else:
        logger.error("训练失败")
