import os
import datetime
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from util.logUtil import Logger
from util.commonUtil import data_preprocessing
import joblib


# 分析数据（增加日志）
def analysis_data(data, logger):
    logger.info("开始数据特征AUC分析...")
    columns_tab = data.columns
    n_list = []
    for c in columns_tab:
        if c == 'Attrition':
            continue
        if data[c].dtypes == 'int64':
            try:
                auc_score = roc_auc_score(data['Attrition'], data[c])
                n_list.append((c, auc_score))
                logger.info(f"特征 {c} 的AUC值: {auc_score:.4f}")
            except Exception as e:
                logger.warning(f"特征 {c} 计算AUC失败: {str(e)}")
    return n_list


# 数据处理、特征工程（优化）
def data_processing(data, logger):
    logger.info("=========开始进行特征工程===================")

    # 1.缺失值处理
    logger.info(f"处理前缺失值情况:\n{data.isnull().sum()[data.isnull().sum() > 0]}")
    num_cols = data.select_dtypes(include=['int64', 'float64']).columns
    cat_cols = data.select_dtypes(include=['object']).columns
    data[num_cols] = data[num_cols].fillna(data[num_cols].median())  # 数值型用中位数
    data[cat_cols] = data[cat_cols].fillna(data[cat_cols].mode().iloc[0])  # 类别型用众数
    logger.info("缺失值处理完成")

    # 2.删除无关特征（假设存在非预测列）
    drop_cols = ['EmployeeID'] if 'EmployeeID' in data.columns else []
    if drop_cols:
        data = data.drop(columns=drop_cols)
        logger.info(f"删除无关特征: {drop_cols}")

    # 3.创建映射表
    map_Bus = {
        'Non-Travel': 0, 'Travel_Rarely': 1, 'Travel_Frequently': 2,
        'Human Resources': 1, 'Research & Development': 2, 'Sales': 3,
        'Divorced': 0, 'Single': 1, 'Married': 2,
        'Male': 0, 'Female': 1
    }

    # 4.映射替换（增加未映射值警告）
    for col in ['MaritalStatus', 'BusinessTravel', 'Department', 'Gender']:
        if col in data.columns:
            unmaped = set(data[col].unique()) - set(map_Bus.keys())
            if unmaped:
                logger.warning(f"特征 {col} 存在未映射值: {unmaped}，将被转换为NaN")
            data[col] = data[col].map(map_Bus)

    # 5.独热编码
    data_dum = pd.get_dummies(data).astype(int)
    logger.info(f"独热编码后特征数: {data_dum.shape[1]}")

    # 6.特征标签拆分
    x = data_dum.drop('Attrition', axis=1)
    y = data_dum['Attrition']

    # 7.标准化（保存scaler）
    std = StandardScaler()
    x_scaled = std.fit_transform(x)
    scaler_path = os.path.join(os.path.abspath('../model'), 'scaler.pkl')
    joblib.dump(std, scaler_path)
    logger.info(f"标准化完成，标准化器保存至: {scaler_path}")

    return x_scaled, y, std


# 训练模型（调优）
def model_train(x_train, y_train, logger):
    logger.info("开始模型训练与调优...")

    # 1.拆分训练集和验证集
    x_tr, x_val, y_tr, y_val = train_test_split(
        x_train, y_train, test_size=0.2, random_state=42
    )
    logger.info(f"训练集形状: {x_tr.shape}, 验证集形状: {x_val.shape}")

    # 2.超参数网格搜索
    param_grid = {
        'C': [0.01, 0.1, 1, 10],
        'penalty': ['l1', 'l2'],
        'solver': ['liblinear']  # 支持l1正则化
    }
    grid_search = GridSearchCV(
        LogisticRegression(max_iter=1000),
        param_grid,
        cv=5,
        scoring='roc_auc'
    )
    grid_search.fit(x_tr, y_tr)
    logger.info(f"最佳超参数: {grid_search.best_params_}，交叉验证AUC: {grid_search.best_score_:.4f}")

    # 3.验证集评估
    best_model = grid_search.best_estimator_
    y_val_pred = best_model.predict(x_val)
    val_auc = roc_auc_score(y_val, best_model.predict_proba(x_val)[:, 1])
    val_acc = accuracy_score(y_val, y_val_pred)
    logger.info(f"验证集性能 - AUC: {val_auc:.4f}, 准确率: {val_acc:.4f}")

    # 4.保存模型
    model_path = os.path.join(os.path.abspath('../model'), 'logistic_model.pkl')
    joblib.dump(best_model, model_path)
    logger.info(f"最佳模型保存至: {model_path}")


class PowerLoadModel(object):
    def __init__(self, filename):
        # 配置日志记录（修正路径）
        logfile_name = "train_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger(os.path.abspath('../'), logfile_name).get_logger()
        # 获取数据源
        try:
            self.data_source = data_preprocessing(filename)
            self.logfile.info(f"数据加载成功，形状: {self.data_source.shape}")
        except Exception as e:
            self.logfile.error(f"数据加载失败: {str(e)}", exc_info=True)
            raise  # 抛出异常终止程序


if __name__ == '__main__':
    input_file = os.path.join(os.path.abspath('../data'), 'train.csv')
    model = PowerLoadModel(input_file)
    analysis_data(model.data_source, model.logfile)
    x_train, y_train, _ = data_processing(model.data_source, model.logfile)
    model_train(x_train, y_train, model.logfile)