import os
import pandas as pd
import joblib
import numpy as np
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import ADASYN
import time
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix
from util.logUtil import Logger

# 初始化日志记录器
logger = Logger(
    root_path="../log/",  # 项目根路径
    log_name="test",                  # 日志文件名
    level="info"                        # 日志级别
).get_logger()

def advanced_feature_engineering(data):
    """
    与训练代码完全一致的特征工程
    """
    logger.info("=============特征处理=============")
    data_processed = data.copy()

    # 1. 删除无关特征
    cols_to_drop = ['EmployeeNumber', 'Over18', 'StandardHours']
    data_processed = data_processed.drop([col for col in cols_to_drop if col in data_processed.columns], axis=1)

    # 2. 创建业务逻辑交互特征
    # 工作压力相关
    if all(col in data_processed.columns for col in ['OverTime', 'JobInvolvement']):
        data_processed['work_pressure'] = data_processed['OverTime'] * data_processed['JobInvolvement']

    # 收入满意度比
    if all(col in data_processed.columns for col in ['MonthlyIncome', 'JobSatisfaction']):
        data_processed['income_satisfaction_ratio'] = data_processed['MonthlyIncome'] / (
                    data_processed['JobSatisfaction'] + 1)

    # 职业发展停滞指标
    if all(col in data_processed.columns for col in ['YearsSinceLastPromotion', 'JobLevel']):
        data_processed['promotion_stagnation'] = data_processed['YearsSinceLastPromotion'] / (
                    data_processed['JobLevel'] + 1)

    # 公司忠诚度
    if all(col in data_processed.columns for col in ['YearsAtCompany', 'NumCompaniesWorked']):
        data_processed['company_loyalty'] = data_processed['YearsAtCompany'] / (
                    data_processed['NumCompaniesWorked'] + 1)

    # 工作生活压力
    if all(col in data_processed.columns for col in ['WorkLifeBalance', 'EnvironmentSatisfaction']):
        data_processed['work_life_stress'] = (6 - data_processed['WorkLifeBalance']) * (
                    6 - data_processed['EnvironmentSatisfaction'])

    # 通勤压力
    if all(col in data_processed.columns for col in ['DistanceFromHome', 'OverTime']):
        data_processed['commute_stress'] = data_processed['DistanceFromHome'] * data_processed['OverTime']

    # 3. 创建布尔特征
    if all(col in data_processed.columns for col in ['BusinessTravel', 'OverTime']):
        data_processed['high_travel_high_overtime'] = (
                (data_processed['BusinessTravel'] == 2) & (data_processed['OverTime'] == 1)
        ).astype(int)

    if all(col in data_processed.columns for col in ['JobSatisfaction', 'JobInvolvement']):
        data_processed['low_satisfaction_high_involvement'] = (
                (data_processed['JobSatisfaction'] <= 2) & (data_processed['JobInvolvement'] >= 3)
        ).astype(int)

    if all(col in data_processed.columns for col in ['YearsSinceLastPromotion', 'JobLevel']):
        data_processed['stagnant_career'] = (
                (data_processed['YearsSinceLastPromotion'] > 2) & (data_processed['JobLevel'] <= 2)
        ).astype(int)

    print(f"特征工程后特征数量: {data_processed.shape[1]}")

    return data_processed


def encode_features(data):
    """
    与训练代码一致的编码处理
    """
    logger.info("===============特征编码==============")
    x = data.drop('Attrition', axis=1)
    y = data['Attrition']

    le = LabelEncoder()
    y_encoded = le.fit_transform(y)

    # 识别类别特征
    categorical_columns = x.select_dtypes(include=['object']).columns

    for col in categorical_columns:
        x[col] = le.fit_transform(x[col].astype(str))

    print(f"编码后特征形状: {x.shape}")

    return x, y_encoded, le


def handle_imbalance(x, y, method='adasyn'):
    """
    处理类别不平衡
    """
    logger.info("==============处理类别不平衡==============")
    print(f"使用 {method.upper()} 处理类别不平衡...")

    if method.lower() == 'adasyn':
        sampler = ADASYN(random_state=42)
    else:
        print("使用默认的 ADASYN")
        sampler = ADASYN(random_state=42)

    start_time = time.time()
    x_resampled, y_resampled = sampler.fit_resample(x, y)
    end_time = time.time()

    print(f"采样前类别分布: {np.bincount(y)}")
    print(f"采样后类别分布: {np.bincount(y_resampled)}")
    print(f"采样耗时: {end_time - start_time:.2f} 秒")

    return x_resampled, y_resampled


def select_11_features(x):
    """
    选择训练时使用的11个特定特征
    """
    # 训练时使用的11个特征
    selected_features = [
        'EnvironmentSatisfaction',
        'JobInvolvement',
        'JobLevel',
        'MaritalStatus',  # 注意：您提供的可能是'Martialstatus'，但这里用标准名称
        'OverTime',
        'RelationshipSatisfaction',
        'StockOptionLevel',
        'YearsInCurrentRole',
        'YearsSinceLastPromotion',
        'commute_stress',
        'stagnant_career'
    ]

    # 检查数据中是否有这些特征
    available_features = []
    missing_features = []

    for feature in selected_features:
        if feature in x.columns:
            available_features.append(feature)
        else:
            missing_features.append(feature)

    if missing_features:
        print(f"警告: 以下特征在数据中缺失: {missing_features}")
        print("尝试查找相似的列名...")

        # 尝试查找相似的列名
        all_columns = x.columns.tolist()
        for missing_feature in missing_features:
            # 查找包含相似名称的列
            similar_columns = [col for col in all_columns if missing_feature.lower() in col.lower()]
            if similar_columns:
                print(f"  '{missing_feature}' 可能对应: {similar_columns}")

    if len(available_features) == 0:
        raise ValueError("没有可用的特征，特征选择失败")

    print(f"从 {x.shape[1]} 个特征中选择 {len(available_features)} 个重要特征")
    x_selected = x[available_features]
    print(f"特征选择后形状: {x_selected.shape}")

    return x_selected, available_features


if __name__ == '__main__':
    # 加载测试数据
    data = pd.read_csv('../data/test.csv')

    print("=" * 50)
    print("开始预测流程")
    print("=" * 50)
    print(f"原始数据形状: {data.shape}")

    # 1. 应用与训练代码完全一致的特征工程
    data_processed = advanced_feature_engineering(data)

    # 2. 应用与训练代码一致的编码处理
    x, y, label_encoder = encode_features(data_processed)

    # 3. 处理类别不平衡（与训练代码保持一致）
    x_resampled, y_resampled = handle_imbalance(x, y, 'adasyn')

    # 4. 选择训练时使用的11个特定特征
    x_selected, selected_features = select_11_features(x_resampled)
    print(f"使用的特征: {selected_features}")

    # 5. 加载模型
    model_path = os.path.abspath('../model/xgb_model_auc_0.9344.pkl')

    if os.path.exists(model_path):
        print(f"加载模型: {model_path}")
        model = joblib.load(model_path)
    else:
        print("模型文件不存在:", model_path)
        # 如果找不到指定模型，尝试加载其他模型
        model_dir = '../model/'
        model_files = [f for f in os.listdir(model_dir) if f.endswith('.pkl')]
        if model_files:
            latest_model = os.path.join(model_dir, model_files[0])
            print(f"尝试加载其他模型: {latest_model}")
            model = joblib.load(latest_model)
        else:
            raise FileNotFoundError("没有找到任何模型文件")

    # 6. 进行预测
    logger.info("=============开始测试==============")
    print("进行预测...")

    # 预测概率
    y_pred_proba = model.predict_proba(x_selected)[:, 1]

    # 预测类别
    y_pred = model.predict(x_selected)

    # 7. 评估模型
    auc_score = roc_auc_score(y_resampled, y_pred_proba)
    accuracy = np.mean(y_pred == y_resampled)

    print("\n" + "=" * 50)
    print("预测结果评估")
    print("=" * 50)
    print(f"ROC AUC分数: {auc_score:.4f}")
    print(f"准确率: {accuracy:.4f}")

    # 详细分类报告
    print("\n分类报告:")
    print(classification_report(y_resampled, y_pred))

    print("\n混淆矩阵:")
    print(confusion_matrix(y_resampled, y_pred))

    logger.info("=============测试结束==============")

    # 8. 保存预测结果
    results_df = pd.DataFrame({
        '真实标签': y_resampled,
        '预测标签': y_pred,
        '预测概率': y_pred_proba
    })

    # 添加使用的特征
    for i, feature in enumerate(selected_features):
        results_df[feature] = x_selected.iloc[:, i].values

    # 保存结果
    output_path = '../results/predictions.csv'
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    results_df.to_csv(output_path, index=False)
    print(f"\n预测结果已保存至: {output_path}")

    # 9. 分析预测结果
    print("\n" + "=" * 50)
    print("预测结果分析")
    print("=" * 50)

    # 计算各类别的数量
    unique, counts = np.unique(y_pred, return_counts=True)
    prediction_dist = dict(zip(unique, counts))

    print(f"预测类别分布: {prediction_dist}")
    print(f"离职预测比例: {np.mean(y_pred):.2%}")

    # 识别高离职风险员工（预测概率 > 0.7）
    high_risk_indices = y_pred_proba > 0.7
    high_risk_count = np.sum(high_risk_indices)

    print(f"高离职风险员工数量 (概率 > 0.7): {high_risk_count} ({high_risk_count / len(y_pred):.2%})")

    if auc_score >= 0.75:
        print("🎉 模型性能达到目标要求 (AUC >= 0.75)")
    else:
        print("⚠️ 模型性能未达到目标要求")