
import os
import numpy as np
import pandas as pd
import datetime
from sklearn.preprocessing import StandardScaler
from util.logUtil06 import Logger
from xgboost import XGBRegressor, XGBClassifier
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, confusion_matrix, f1_score, \
    recall_score, precision_score
import joblib
import logging

# ==============================
# 配置日志：指定日志文件路径
# ==============================
LOG_DIR = '../../log'
os.makedirs(LOG_DIR, exist_ok=True)  # 自动创建 logs 目录
log_filename = os.path.join(LOG_DIR, f'train_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}.log')

# 如果你的 Logger 支持传入文件路径，可以直接用；
# 否则我们临时重写一个简易日志配置（推荐方式）
# 创建 logger
logger = logging.getLogger('HR_Attrition')
logger.setLevel(logging.INFO)

# 避免重复添加 handler
if not logger.handlers:
    # 格式化器
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # 文件输出
    fh = logging.FileHandler(log_filename, encoding='utf-8')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)

logger.info(f"日志将写入文件: {os.path.abspath(log_filename)}")

def feature_engineering():
    logger.info("开始特征工程处理...")

    # 1. 加载数据
    train = pd.read_csv('../../data/train.csv')
    test = pd.read_csv('../../data/test.csv')
    logger.info(f"原始训练集形状: {train.shape}")
    logger.info(f"原始测试集形状: {test.shape}")

    # 2. 删除无用列
    drop_cols = ['Over18', 'StandardHours', 'EmployeeNumber']
    train = train.drop(columns=drop_cols)
    test = test.drop(columns=drop_cols)
    logger.info(f"已删除无用列: {drop_cols}")

    # 3. 分离特征和标签
    X = train.drop('Attrition', axis=1)
    y = train['Attrition']
    logger.info(f"标签值分布:\n{y.value_counts().sort_index()}")

    # 4. 类别列编码
    cat_cols = ['BusinessTravel', 'Department', 'EducationField',
                'Gender', 'JobRole', 'MaritalStatus', 'OverTime']
    logger.info(f"待编码类别列: {cat_cols}")

    for col in cat_cols:
        unique_vals = sorted(X[col].unique())
        mapping = {val: idx for idx, val in enumerate(unique_vals)}
        X[col] = X[col].map(mapping).astype(int)
        test[col] = test[col].map(mapping).fillna(-1).astype(int)
        logger.debug(f"列 '{col}' 编码映射: {mapping}")

    logger.info("类别列编码完成")

    # 5. 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=y
    )
    logger.info(f"划分完成 - 训练集: {X_train.shape}, 验证集: {X_val.shape}")

    # 6. 保存处理后的数据
    os.makedirs('../../data', exist_ok=True)

    pd.concat([X_train, y_train], axis=1).to_csv('../../data/train_processed.csv', index=False)
    pd.concat([X_val, y_val], axis=1).to_csv('../../data/val_processed.csv', index=False)
    test.to_csv('../../data/test_processed.csv', index=False)

    logger.info("特征工程完成！")
    logger.info("输出文件:")
    logger.info("  - ../../data/train_processed.csv")
    logger.info("  - ../../data/val_processed.csv")
    logger.info("  - ../../data/test_processed.csv")


def model_train():
        logger.info("开始模型训练...")

        train = pd.read_csv('../../data/train_processed.csv')
        val = pd.read_csv('../../data/val_processed.csv')
        logger.info(f"加载训练集: {train.shape}, 验证集: {val.shape}")

        x_train = train.iloc[:, :-1]
        y_train = train.iloc[:, -1]
        x_val = val.iloc[:, :-1]
        y_val = val.iloc[:, -1]
        logger.info(f"训练特征维度: {x_train.shape}, 验证特征维度: {x_val.shape}")

        model = XGBClassifier(
            max_depth=10,
            n_estimators=30,
            learning_rate=0.2,
            random_state=42,
            eval_metric='logloss'
        )
        model.fit(x_train, y_train)
        logger.info("XGBoost 模型训练完成")

        predict_1 = model.predict(x_val)
        acc = accuracy_score(y_val, predict_1)
        logger.info(f"模型准确率: {acc:.4f}")

        auc = roc_auc_score(y_val, model.predict_proba(x_val)[:, 1])
        logger.info(f"验证集 AUC: {auc:.4f}")

        report = classification_report(y_val, predict_1, output_dict=False)
        logger.info("分类报告:\n" + report)

        model_path = '../../model/test_1107.pkl'
        joblib.dump(model, model_path)
        logger.info(f"模型已保存至: {os.path.abspath(model_path)}")
        logger.info(" 模型训练与保存完成！")

def model_evaluate():
    logger.info("开始模型评估...")

    # 1. 加载模型
    model_path = '../../model/test_1107.pkl'
    if not os.path.exists(model_path):
        raise FileNotFoundError(f" 模型文件不存在: {os.path.abspath(model_path)}")
    model = joblib.load(model_path)
    logger.info("模型加载成功")

    # 2. 加载处理后的测试集
    test_path = '../../data/test_processed.csv'
    test = pd.read_csv(test_path)
    logger.info(f"测试集总形状: {test.shape}")

    # 3. 分离特征和标签（最后一列是 y）
    X_test = test.iloc[:, :-1]      # 所有列除了最后一列
    y_test = test.iloc[:, -1]       # 最后一列作为标签

    logger.info(f"特征维度: {X_test.shape}, 标签样本数: {len(y_test)}")
    logger.info(f"标签值分布:\n{y_test.value_counts().sort_index()}")

    # 4. 预测
    y_pred = model.predict(X_test)
    y_proba = model.predict_proba(X_test)[:, 1]

    # 5. 计算指标
    metrics = {
        'Accuracy': accuracy_score(y_test, y_pred),
        'AUC': roc_auc_score(y_test, y_proba),
        'Precision': precision_score(y_test, y_pred),
        'Recall': recall_score(y_test, y_pred),
        'F1': f1_score(y_test, y_pred)
    }

    logger.info("测试集评估结果:")
    for name, value in metrics.items():
        logger.info(f"  - {name}: {value:.4f}")

    # 6. 详细报告
    logger.info("\n 分类报告:\n" + classification_report(y_test, y_pred))
    logger.info("模型评估完成！")


def model_train_test():
    # 只加载训练集（用于交叉验证)
    train = pd.read_csv('../../data/train_processed.csv')
    val = pd.read_csv('../../data/val_processed.csv')
    x_train = train.iloc[:, :-1]
    y_train = train.iloc[:, -1]
    x_val = val.iloc[:, :-1]
    y_val = val.iloc[:, -1]
    # 加载模型
    model1 = joblib.load('../../model/test_1107.pkl')

    param_dict = {
        'max_depth': [3,5,8,10,12],
        'n_estimators': [10,30,50,80,120,150,180],
        'learning_rate': [0.1,0.2,0.3,0.5,0.7, 1.0, 1.2]
    }

    # 创建分层采样: cv = StratifiedKFold
    # n_splits  折数       shuffle 是否打乱数据(作用：防止原始数据顺序对模型训练产生影响)
    k_fold = StratifiedKFold(n_splits=4, shuffle=True)
    # # 2.网格化搜索与交叉验证
    cv = GridSearchCV(model1,param_grid=param_dict,cv=k_fold)
    cv.fit(x_train,y_train)
    # 用最佳模型在验证集上测试（可选）
    best_model = cv.best_estimator_
    val = pd.read_csv('../../data/val_processed.csv')
    x_train = val.iloc[:, :-1]
    y_train = val.iloc[:, -1]
    y_pred = best_model.predict(x_val)

    bp = best_model.get_params
    logger.info(f'最优参数:{bp}')

    acc = accuracy_score(y_val, y_pred)
    logger.info(f"最佳模型在验证集上的准确率: {acc:.4f}")

    logger.info(f'ruc值',roc_auc_score(y_val, y_pred))
    # 保存最佳模型
    joblib.dump(best_model, '../../model/best_model.pkl')
    logger.info("网格搜索完成，最佳模型已保存！")


if __name__ == '__main__':
    try:
        # 特征处理
        # feature_engineering()
        # 模型训练,预测
        model_train()
        # 训练出来的模型, 在测试集上做预测
        model_evaluate()
        # 交叉验证,网格搜索
        # model_train_test()
        logger.info("整个流程执行成功！🎉")
    except Exception as e:
        logger.error(f"程序执行出错: {str(e)}", exc_info=True)
        raise