import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix, roc_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
import time
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.metrics import accuracy_score
import xgboost as xgb
import joblib
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.feature_selection import SelectFromModel
import warnings
from util.logUtil import Logger

plt.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体为“黑体”（或其他支持中文的字体，如“Microsoft YaHei”）
plt.rcParams['axes.unicode_minus'] = False    # 解决负号“-”显示为方块的问题
warnings.filterwarnings('ignore')

# 初始化日志记录器
logger = Logger(
    root_path="../log/",  # 项目根路径
    log_name="train",                  # 日志文件名
    level="info"                        # 日志级别
).get_logger()

def analysis_data(data):
    """
    增强数据分析功能
    """
    logger.info("===============数据分析==============")
    print("=" * 50)
    print("数据探索分析")
    print("=" * 50)

    # 基本信息
    print(f"数据形状: {data.shape}")
    print(f"目标变量分布:\n{data['Attrition'].value_counts()}")
    print(f"离职率: {data['Attrition'].mean():.2%}")

    # 可视化分析
    fig, axes = plt.subplots(2, 3, figsize=(18, 12))

    # 1. 离职比例
    data['Attrition'].value_counts().plot.pie(autopct='%1.1f%%', ax=axes[0, 0])
    axes[0, 0].set_title('离职比例分布')

    # 2. 年龄分布与离职关系
    sns.kdeplot(data=data[data['Attrition'] == 1], x='Age', label='离职', ax=axes[0, 1])
    sns.kdeplot(data=data[data['Attrition'] == 0], x='Age', label='未离职', ax=axes[0, 1])
    axes[0, 1].grid(True)
    axes[0, 1].legend()
    axes[0, 1].set_title('年龄分布 vs 离职')

    # 3. 月收入与离职关系
    sns.boxplot(x='Attrition', y='MonthlyIncome', data=data, ax=axes[0, 2])
    axes[0, 2].set_title('月收入 vs 离职')

    # 4. 工作满意度
    attrition_by_satisfaction = data.groupby('JobSatisfaction')['Attrition'].mean()
    axes[1, 0].bar(attrition_by_satisfaction.index, attrition_by_satisfaction.values)
    axes[1, 0].set_title('工作满意度 vs 离职率')
    axes[1, 0].set_xlabel('工作满意度')
    axes[1, 0].set_ylabel('离职率')

    # 5. 加班与离职关系
    overtime_attrition = data.groupby('OverTime')['Attrition'].mean()
    axes[1, 1].bar(overtime_attrition.index, overtime_attrition.values)
    axes[1, 1].set_title('加班 vs 离职率')
    axes[1, 1].set_xlabel('是否加班')
    axes[1, 1].set_ylabel('离职率')

    # 6. 工作生活平衡
    balance_attrition = data.groupby('WorkLifeBalance')['Attrition'].mean()
    axes[1, 2].bar(balance_attrition.index, balance_attrition.values)
    axes[1, 2].set_title('工作生活平衡 vs 离职率')
    axes[1, 2].set_xlabel('工作生活平衡评分')
    axes[1, 2].set_ylabel('离职率')

    plt.tight_layout()
    plt.savefig('../data/fig/员工离职影响因素分析图表汇总', dpi=300, bbox_inches='tight')
    plt.show()

    # 计算年龄的AUC
    age_auc = roc_auc_score(data['Attrition'], data['Age'])
    print(f"年龄特征的AUC: {age_auc:.4f}")


def advanced_feature_engineering(data):
    """
    增强的特征工程
    """
    logger.info("==============特征处理==============")
    print("=" * 50)
    print("特征工程")
    print("=" * 50)

    data_processed = data.copy()

    # 1. 删除无关特征
    cols_to_drop = ['EmployeeNumber', 'Over18', 'StandardHours']
    data_processed = data_processed.drop([col for col in cols_to_drop if col in data_processed.columns], axis=1)

    # 2. 创建业务逻辑交互特征
    print("创建业务逻辑交互特征...")

    # 工作压力相关
    if all(col in data_processed.columns for col in ['OverTime', 'JobInvolvement']):
        data_processed['work_pressure'] = data_processed['OverTime'] * data_processed['JobInvolvement']

    # 收入满意度比
    if all(col in data_processed.columns for col in ['MonthlyIncome', 'JobSatisfaction']):
        data_processed['income_satisfaction_ratio'] = data_processed['MonthlyIncome'] / (
                data_processed['JobSatisfaction'] + 1)

    # 职业发展停滞指标
    if all(col in data_processed.columns for col in ['YearsSinceLastPromotion', 'JobLevel']):
        data_processed['promotion_stagnation'] = data_processed['YearsSinceLastPromotion'] / (
                data_processed['JobLevel'] + 1)

    # 公司忠诚度
    if all(col in data_processed.columns for col in ['YearsAtCompany', 'NumCompaniesWorked']):
        data_processed['company_loyalty'] = data_processed['YearsAtCompany'] / (
                data_processed['NumCompaniesWorked'] + 1)

    # 工作生活压力
    if all(col in data_processed.columns for col in ['WorkLifeBalance', 'EnvironmentSatisfaction']):
        data_processed['work_life_stress'] = (6 - data_processed['WorkLifeBalance']) * (
                6 - data_processed['EnvironmentSatisfaction'])

    # 通勤压力
    if all(col in data_processed.columns for col in ['DistanceFromHome', 'OverTime']):
        data_processed['commute_stress'] = data_processed['DistanceFromHome'] * data_processed['OverTime']

    # 3. 创建布尔特征
    if all(col in data_processed.columns for col in ['BusinessTravel', 'OverTime']):
        data_processed['high_travel_high_overtime'] = (
                (data_processed['BusinessTravel'] == 2) & (data_processed['OverTime'] == 1)
        ).astype(int)

    if all(col in data_processed.columns for col in ['JobSatisfaction', 'JobInvolvement']):
        data_processed['low_satisfaction_high_involvement'] = (
                (data_processed['JobSatisfaction'] <= 2) & (data_processed['JobInvolvement'] >= 3)
        ).astype(int)

    if all(col in data_processed.columns for col in ['YearsSinceLastPromotion', 'JobLevel']):
        data_processed['stagnant_career'] = (
                (data_processed['YearsSinceLastPromotion'] > 2) & (data_processed['JobLevel'] <= 2)
        ).astype(int)

    print(f"特征工程后特征数量: {data_processed.shape[1]}")

    print(data_processed.describe())
    return data_processed


def encode_features(data):
    """
    编码类别特征
    """
    logger.info("===============特征编码=================")
    print("编码类别特征...")

    x = data.drop('Attrition', axis=1)
    y = data['Attrition']

    le = LabelEncoder()
    y_encoded = le.fit_transform(y)

    # 识别类别特征
    categorical_columns = x.select_dtypes(include=['object']).columns

    for col in categorical_columns:
        x[col] = le.fit_transform(x[col].astype(str))

    print(f"编码后特征形状: {x.shape}")

    return x, y_encoded, le


def handle_imbalance(x, y, method='adasyn'):
    """
    处理类别不平衡
    """
    logger.info("===============处理类别不平衡===============")
    print(f"使用 {method.upper()} 处理类别不平衡...")

    if method.lower() == 'adasyn':
        sampler = ADASYN(random_state=42)
    elif method.lower() == 'smote':
        sampler = SMOTE(random_state=42)
    elif method.lower() == 'oversample':
        sampler = RandomOverSampler(random_state=42)
    else:
        print("使用默认的 ADASYN")
        sampler = ADASYN(random_state=42)

    start_time = time.time()
    x_resampled, y_resampled = sampler.fit_resample(x, y)
    end_time = time.time()

    print(f"采样前类别分布: {np.bincount(y)}")
    print(f"采样后类别分布: {np.bincount(y_resampled)}")
    print(f"采样耗时: {end_time - start_time:.2f} 秒")

    return x_resampled, y_resampled


def feature_selection(x, y, method='importance', threshold='mean'):
    """
    特征选择
    """
    logger.info("================特征选择================")
    print("进行特征选择...")

    if method == 'importance':
        # 使用XGBoost进行特征重要性选择
        selector = xgb.XGBClassifier(random_state=42, n_estimators=100)
        selector.fit(x, y)

        # 选择重要性高于平均值的特征
        selection = SelectFromModel(selector, threshold=threshold, prefit=True)
        x_selected = selection.transform(x)

        selected_features = x.columns[selection.get_support()]
        print(f"特征选择后保留 {len(selected_features)} 个特征")
        print(f"重要特征: {list(selected_features)}")

        return x_selected, selected_features
    else:
        # 不进行特征选择
        return x, x.columns


def advanced_model_training(x, y):
    """
    增强的模型训练流程
    """
    logger.info("===============开始训练==============")
    print("=" * 50)
    print("模型训练")
    print("=" * 50)

    # 划分训练测试集
    x_train, x_test, y_train, y_test = train_test_split(
        x, y, test_size=0.2, random_state=42, stratify=y
    )

    # 可选：特征缩放（XGBoost通常不需要，但可以尝试）
    use_scaling = False  # 设置为True尝试特征缩放
    if use_scaling:
        scaler = StandardScaler()
        x_train = scaler.fit_transform(x_train)
        x_test = scaler.transform(x_test)
        print("已进行特征标准化")

    # 参数调优
    print("开始超参数调优...")

    param_dist = {
        'learning_rate': [0.01, 0.05, 0.1, 0.15, 0.2],
        'max_depth': [3, 4, 5, 6, 7, 8],
        'min_child_weight': [1, 3, 5, 7],
        'subsample': [0.6, 0.7, 0.8, 0.9, 1.0],
        'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1.0],
        'reg_alpha': [0, 0.1, 0.5, 1, 5],
        'reg_lambda': [0.1, 0.5, 1, 5, 10],
        'n_estimators': [100, 200, 300, 500],
        'scale_pos_weight': [1, 2, 3, 5]  # 处理类别不平衡
    }

    xgb_model = xgb.XGBClassifier(
        random_state=42,
        eval_metric='auc',
        use_label_encoder=False
    )

    # 使用随机搜索
    random_search = RandomizedSearchCV(
        xgb_model, param_dist, n_iter=50,
        scoring='roc_auc', cv=5, random_state=42,
        n_jobs=-1, verbose=1
    )

    random_search.fit(x_train, y_train)

    # 获取最佳模型
    best_model = random_search.best_estimator_
    best_params = random_search.best_params_

    print(f"最佳参数: {best_params}")
    print(f"最佳交叉验证AUC: {random_search.best_score_:.4f}")

    # 使用最佳模型进行预测
    y_pred_proba = best_model.predict_proba(x_test)[:, 1]
    test_auc = roc_auc_score(y_test, y_pred_proba)

    # 寻找最佳阈值
    fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
    optimal_idx = np.argmax(tpr - fpr)
    optimal_threshold = thresholds[optimal_idx]

    y_pred_optimal = (y_pred_proba >= optimal_threshold).astype(int)

    # 评估模型
    print("\n" + "=" * 50)
    print("模型评估结果")
    print("=" * 50)
    print(f"测试集AUC: {test_auc:.4f}")
    print(f"最佳阈值: {optimal_threshold:.4f}")
    print(f"准确率: {accuracy_score(y_test, y_pred_optimal):.4f}")
    print("\n分类报告:")
    print(classification_report(y_test, y_pred_optimal))
    print("\n混淆矩阵:")
    print(confusion_matrix(y_test, y_pred_optimal))

    # 绘制ROC曲线
    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {test_auc:.4f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='随机分类器')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title('ROC曲线')
    plt.legend(loc="lower right")
    plt.grid(True)
    plt.savefig('../data/fig/ROC曲线', dpi=300, bbox_inches='tight')
    plt.show()

    # 交叉验证
    print("\n进行交叉验证...")
    cv_scores = cross_val_score(
        best_model, x, y,
        cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=42),
        scoring='roc_auc'
    )
    print(f"交叉验证AUC: {cv_scores.mean():.4f} (+/- {cv_scores.std() * 2:.4f})")

    # 特征重要性
    if hasattr(best_model, 'feature_importances_'):
        feature_importance = pd.DataFrame({
            'feature': range(x.shape[1]),
            'importance': best_model.feature_importances_
        }).sort_values('importance', ascending=False)

        plt.figure(figsize=(10, 8))
        sns.barplot(data=feature_importance.head(15), x='importance', y='feature')
        plt.title('前15个重要特征')
        plt.tight_layout()
        plt.savefig('../data/fig/前15个重要特征')
        plt.show()

    feature_info = {
        'selected_features': selected_features,
        'feature_names': x.columns.tolist() if hasattr(x, 'columns') else None,
        'num_features': x.shape[1]
    }

    # 保存模型
    model_filename = f'../model/xgb_model_auc_{test_auc:.4f}.pkl'
    joblib.dump(best_model, model_filename)
    print(f"模型已保存为: {model_filename}")

    # 保存特征信息
    feature_info_filename = f'../model/xgb_model_auc_{test_auc:.4f}_features.pkl'
    joblib.dump(feature_info, feature_info_filename)

    print(f"模型已保存为: {model_filename}")
    print(f"特征信息已保存为: {feature_info_filename}")

    logger.info("===============训练结束===============")

    return best_model, test_auc, best_params, selected_features


def compare_sampling_methods(x, y):
    """
    比较不同采样方法的效果
    """
    print("=" * 50)
    print("比较不同采样方法")
    print("=" * 50)

    methods = ['adasyn', 'smote', 'oversample']
    results = {}

    x_train, x_test, y_train, y_test = train_test_split(
        x, y, test_size=0.2, random_state=42, stratify=y
    )

    for method in methods:
        print(f"\n测试方法: {method}")

        # 处理不平衡
        if method == 'original':
            x_resampled, y_resampled = x_train, y_train
        else:
            x_resampled, y_resampled = handle_imbalance(x_train, y_train, method)

        # 训练简单模型比较
        model = xgb.XGBClassifier(
            n_estimators=100,
            random_state=42,
            eval_metric='auc',
            use_label_encoder=False
        )

        model.fit(x_resampled, y_resampled)
        y_pred_proba = model.predict_proba(x_test)[:, 1]
        auc_score = roc_auc_score(y_test, y_pred_proba)

        results[method] = auc_score
        print(f"{method} AUC: {auc_score:.4f}")

    # 找出最佳方法
    best_method = max(results, key=results.get)
    print(f"\n最佳采样方法: {best_method} (AUC: {results[best_method]:.4f})")

    return best_method, results


if __name__ == '__main__':
    # 加载数据
    data = pd.read_csv('../data/train.csv')

    # 1. 数据分析
    analysis_data(data)

    # 2. 特征工程
    data_processed = advanced_feature_engineering(data)

    # 3. 特征编码
    x, y, label_encoder = encode_features(data_processed)

    # 4. 比较不同采样方法
    best_method, sampling_results = compare_sampling_methods(x, y)

    # 5. 使用最佳采样方法处理数据
    x_resampled, y_resampled = handle_imbalance(x, y, best_method)

    # 6. 特征选择（可选）
    use_feature_selection = True
    if use_feature_selection:
        x_final, selected_features = feature_selection(x_resampled, y_resampled)
    else:
        x_final, selected_features = x_resampled, x_resampled.columns

    # 7. 模型训练
    best_model, final_auc, best_params, selected_features = advanced_model_training(x_final, y_resampled)

    # 8. 最终结果
    print("\n" + "=" * 60)
    print("最终结果总结")
    print("=" * 60)
    print(f"最佳采样方法: {best_method}")
    print(f"最终测试集AUC: {final_auc:.4f}")

    if final_auc >= 0.75:
        print("🎉 恭喜！模型AUC达到目标要求 (>= 0.75)")
    else:
        print("⚠️ 模型AUC未达到目标要求，建议进一步优化")

    print(f"最佳参数: {best_params}")
