# -*- coding: utf-8 -*-
"""
测试集评估脚本（支持 LGBM + CatBoost 融合 + 阈值优化 + 特征工程 + 可视化）
"""

import pandas as pd
import numpy as np
import lightgbm as lgb
import catboost as cb
from sklearn.metrics import (
    roc_auc_score, accuracy_score, precision_score,
    recall_score, f1_score, classification_report,
    roc_curve, confusion_matrix
)
import warnings
import club
import matplotlib.pyplot as plt
import seaborn as sns
import log_util
import os  # 为 plot 函数提供 os.makedirs

warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def find_best_threshold(y_true, y_proba, metric='f1'):
    best_thresh, best_score = 0.5, -1
    for thresh in np.arange(0.1, 0.51, 0.01):
        y_pred = (y_proba >= thresh).astype(int)
        if metric == 'accuracy':
            score = accuracy_score(y_true, y_pred)
        elif metric == 'f1':
            score = f1_score(y_true, y_pred)
        else:
            continue
        if score > best_score:
            best_score, best_thresh = score, thresh
    return best_thresh, best_score


def plot_roc_curve_and_cm(y_true, y_proba, y_pred, save_dir='./models/'):
    """绘制 ROC 曲线和混淆矩阵"""
    os.makedirs(save_dir, exist_ok=True)

    # --- ROC Curve ---
    fpr, tpr, _ = roc_curve(y_true, y_proba)
    auc = roc_auc_score(y_true, y_proba)

    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {auc:.4f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve')
    plt.legend(loc="lower right")
    plt.grid(alpha=0.3)

    # --- Confusion Matrix ---
    cm = confusion_matrix(y_true, y_pred)
    cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    plt.subplot(1, 2, 2)
    sns.heatmap(cm_norm, annot=True, fmt='.2%', cmap='Blues', cbar=False,
                xticklabels=['Not Attrition', 'Attrition'],
                yticklabels=['Not Attrition', 'Attrition'])
    plt.title('Confusion Matrix (Normalized)')
    plt.ylabel('True Label')
    plt.xlabel('Predicted Label')

    # 在格子中添加绝对数值
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j + 0.5, i + 0.6, f'\n({cm[i, j]})',
                     ha="center", va="center", fontsize=9)

    plt.tight_layout()
    plt.savefig(f'{save_dir}/evaluation_visualization.png', dpi=300, bbox_inches='tight')
    plt.close()  # 避免在非交互环境显示


def main():
    # 初始化日志器（日志文件前缀为 "eval"）
    logger = log_util.APICallLogger(root_path=".", log_prefix="eval")

    logger.info("测试模型开始运行")
    logger.info("加载测试集并评估模型...")

    # === 1. 加载测试数据 ===
    df_test = pd.read_csv('./data/test.csv')
    y_test = df_test.iloc[:, -1].astype(int)
    X_test_raw = df_test.iloc[:, :-1]

    # === 2. 对测试集做特征工程 ===
    X_test_eng = club.engineer_features(X_test_raw, is_train=False)

    logger.info(f"特征工程后测试集形状: {X_test_eng.shape}, 正样本比例: {(y_test == 1).mean():.4f}")

    # === 3. 加载模型和元信息 ===
    meta = pd.read_pickle('./models/meta_info.pkl')
    feature_names = meta['feature_names']
    cat_cols = meta['categorical_features']

    missing = set(feature_names) - set(X_test_eng.columns)
    if missing:
        error_msg = f"测试集缺少模型所需的特征: {missing}"
        logger.error(error_msg)
        raise ValueError(error_msg)

    X_test = X_test_eng[feature_names].copy()

    # 处理缺失值与类型
    for col in X_test.columns:
        if col in cat_cols:
            X_test[col] = X_test[col].fillna('Missing').astype('category')
        else:
            X_test[col] = X_test[col].fillna(X_test[col].mean())

    # === 4. 加载模型并预测 ===
    lgb_model = lgb.Booster(model_file='./models/lgb_model.txt')
    cb_model = cb.CatBoostClassifier()
    cb_model.load_model('./models/cb_model.cbm')

    lgb_pred = lgb_model.predict(X_test, num_iteration=lgb_model.best_iteration)
    cb_pred = cb_model.predict_proba(X_test)[:, 1]

    ensemble_pred = 0.4 * lgb_pred + 0.6 * cb_pred

    # === 5. 确定分类阈值 ===
    if 'best_threshold_for_f1' in meta:
        best_thresh = meta['best_threshold_for_f1']
        y_pred = (ensemble_pred >= best_thresh).astype(int)
        logger.info(f"使用训练阶段保存的 F1 最优阈值: {best_thresh:.3f}")
    else:
        best_thresh, best_f1 = find_best_threshold(y_test, ensemble_pred, metric='f1')
        y_pred = (ensemble_pred >= best_thresh).astype(int)
        logger.info(f"在测试集上搜索到的 F1 最优阈值: {best_thresh:.3f} (F1={best_f1:.5f})")

    # === 6. 评估指标 ===
    auc = roc_auc_score(y_test, ensemble_pred)
    acc = accuracy_score(y_test, y_pred)
    prec = precision_score(y_test, y_pred, zero_division=0)
    rec = recall_score(y_test, y_pred, zero_division=0)
    f1 = f1_score(y_test, y_pred, zero_division=0)

    logger.info("=" * 50)
    logger.info("融合模型测试结果")
    logger.info("=" * 50)
    logger.info(f"AUC         : {auc:.5f}")
    logger.info(f"Accuracy    : {acc:.5f}")
    logger.info(f"Precision   : {prec:.5f}")
    logger.info(f"Recall      : {rec:.5f}")
    logger.info(f"F1-score    : {f1:.5f}")
    logger.info(f"Threshold   : {best_thresh:.3f}")
    logger.info("=" * 50)

    # 分类报告逐行记录
    report = classification_report(y_test, y_pred, target_names=['Not Attrition', 'Attrition'])
    logger.info("分类报告:")
    for line in report.strip().split('\n'):
        if line.strip():
            logger.info(line)

    # === 7. 可视化 ===
    try:
        plot_roc_curve_and_cm(y_test, ensemble_pred, y_pred, save_dir='./plots')
        logger.info("可视化图表已保存至 ./plots/evaluation_visualization.png")
    except Exception as e:
        logger.warning(f"可视化失败（不影响预测）: {e}")

    # === 8. 保存评估指标摘要 ===
    metrics_summary = {
        "AUC": round(auc, 5),
        "Accuracy": round(acc, 5),
        "Precision": round(prec, 5),
        "Recall": round(rec, 5),
        "F1-score": round(f1, 5),
        "Threshold": round(best_thresh, 3)
    }

    # 保存为文本文件
    with open('./models/test_metrics.txt', 'w', encoding='utf-8') as f:
        f.write("融合模型测试结果\n")
        f.write("=" * 50 + "\n")
        f.write(f"AUC         : {auc:.5f}\n")
        f.write(f"Accuracy    : {acc:.5f}\n")
        f.write(f"Precision   : {prec:.5f}\n")
        f.write(f"Recall      : {rec:.5f}\n")
        f.write(f"F1-score    : {f1:.5f}\n")
        f.write(f"Threshold   : {best_thresh:.3f}\n")
        f.write("=" * 50 + "\n")

    logger.info("评估指标已保存至 ./models/test_metrics.txt")

    # （可选）如需 JSON，取消注释以下代码：
    # import json
    # with open('./models/test_metrics.json', 'w', encoding='utf-8') as f:
    #     json.dump(metrics_summary, f, indent=4, ensure_ascii=False)
    # logger.info("评估指标已保存至 ./models/test_metrics.json")


if __name__ == '__main__':
    main()