
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from Mr_Zhong.utils.log import Logger
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, classification_report
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.utils.class_weight import compute_sample_weight
from xgboost import XGBClassifier
import seaborn as sns
import joblib

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# =================== 日志 & 数据加载类 ===================
class PowerLoadModel(object):
    def __init__(self, filename):
        logfile_name = "train_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        self.data_source = pd.read_csv(filename, encoding='utf-8')


# =================== 数据分析函数 ===================
def ana_data(data):
    print(f'数据整体情况:\n{data.info}')
    print(f'数据前五列:\n{data.head()}')
    print(f'数据列表名字:\n{data.columns}')

    null_counts = data.isnull().sum()
    plt.figure(figsize=(10, 6))
    plt.bar(null_counts.index, null_counts.values)
    plt.xticks(rotation=90)
    plt.ylabel('异常点数量')
    plt.title('数据集中各列的异常点数量')
    plt.tight_layout()
    plt.savefig('../result/各个特征的异常点数量.png')

    unique_counts = data.nunique()
    plt.figure(figsize=(12, 6))
    bars = plt.bar(unique_counts.index, unique_counts.values, color='skyblue')
    for bar in bars:
        yval = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2.0, yval + 0.5, int(yval), ha='center', va='bottom', fontsize=10)
    plt.xticks(rotation=90)
    plt.ylabel('唯一值数量')
    plt.title('数据集中各特征的类别数量', fontsize=14)
    ax = plt.gca()
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    plt.tight_layout()
    plt.savefig('../result/各个特征的种类数量.png')
    plt.show()


# =================== 特征工程 ===================
def feature_engineering(data, logger):
    logger.info('===============开始进行特征工程处理===============')
    result = data.copy(deep=True)

    # 提取特征
    X_train = result.loc[:, ['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education',
                             'EducationField', 'EnvironmentSatisfaction', 'Gender', 'JobInvolvement', 'JobLevel',
                             'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked',
                             'OverTime', 'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction',
                             'StockOptionLevel', 'TotalWorkingYears', 'TrainingTimesLastYear',
                             'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
                             'YearsWithCurrManager']]
    Y_train = result.iloc[:, 0]

    # 编码映射
    mappings = {
        'BusinessTravel': {'Non-Travel': 0, 'Travel_Rarely': 1, 'Travel_Frequently': 2},
        'Department': {'Human Resources': 1, 'Research & Development': 2, 'Sales': 3},
        'EducationField': {
            'Life Sciences': 1,
            'Medical': 2,
            'Marketing': 3,
            'Technical Degree': 4,
            'Other': 5,
            'Human Resources': 6
        },
        'Gender': {'Male': 1, 'Female': 0},
        'JobRole': {
            'Sales Executive': 1,
            'Research Scientist': 2,
            'Laboratory Technician': 3,
            'Manufacturing Director': 4,
            'Healthcare Representative': 5,
            'Manager': 6,
            'Sales Representative': 7,
            'Research Director': 8,
            'Human Resources': 9
        },
        'MaritalStatus': {'Divorced': 0, 'Single': 1, 'Married': 2},
        'OverTime': {'No': 0, 'Yes': 1}
    }

    for col, mapping in mappings.items():
        X_train[col] = X_train[col].map(mapping)

    logger.info("===============数据特征化结束===================")
    return X_train, Y_train


# =================== 网格搜索训练 ===================
def model_CV_train(X_train, Y_train, logger, param_dict):
    logger.info("=========开始交叉网格处理===================")
    x_train, x_test, y_train, y_test = train_test_split(
        X_train, Y_train, test_size=0.2, random_state=6, stratify=Y_train
    )
    cls_weight = compute_sample_weight('balanced', y_train)

    base_models = [
        ('rf', RandomForestClassifier(random_state=6)),
        ('xgb', XGBClassifier(
            use_label_encoder=False, eval_metric='logloss', verbosity=0, random_state=6
        )),
    ]
    meta_model = LogisticRegression()

    stacking_clf = StackingClassifier(
        estimators=base_models,
        final_estimator=meta_model,
        cv=5,
        passthrough=True
    )

    params = {
        'rf__n_estimators': param_dict['n_estimators'],
        'rf__max_depth': param_dict['max_depth'],
        'xgb__n_estimators': param_dict['n_estimators'],
        'xgb__max_depth': param_dict['max_depth'],
        'xgb__learning_rate': param_dict['learning'],
    }

    grid_cv = GridSearchCV(
        estimator=stacking_clf,
        param_grid=params,
        scoring='roc_auc',
        cv=5,
        verbose=1,
        n_jobs=-1
    )

    grid_cv.fit(x_train, y_train, sample_weight=cls_weight)

    best_params = grid_cv.best_params_
    best_score = grid_cv.best_score_

    print(f"最优参数组合: {best_params}")
    print(f"最佳 AUC 分数: {best_score:.5f}")

    logger.info(f"最优参数组合: {best_params}")
    logger.info(f"最佳 AUC 分数: {best_score:.5f}")
    logger.info("=========交叉网格处理结束===================")

    return {
        'best_params': best_params,
        'best_score': best_score
    }


# =================== 最终模型训练 ===================
def model_train(X_train, Y_train, logger, n_estimators, max_depth, n_estimators1, max_depth1):
    logger.info("=========开始训练模型===================")
    x_train, x_test, y_train, y_test = train_test_split(
        X_train, Y_train, test_size=0.2, random_state=6, stratify=Y_train
    )
    cls_weight = compute_sample_weight('balanced', y_train)

    base_models = [
        ('rf', RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=6)),
        ('xgb', XGBClassifier(
            n_estimators=n_estimators1,
            max_depth=max_depth1,
            learning_rate=0.1,
            use_label_encoder=False,
            eval_metric='logloss',
            random_state=6
        )),
    ]

    meta_model = LogisticRegression()
    stacking_clf = StackingClassifier(
        estimators=base_models,
        final_estimator=meta_model,
        cv=5,
        passthrough=True
    )

    stacking_clf.fit(x_train, y_train)

    y_pred_proba = stacking_clf.predict_proba(x_test)[:, 1]
    y_pred = stacking_clf.predict(x_test)

    auc_score = roc_auc_score(y_test, y_pred_proba)
    acc = accuracy_score(y_test, y_pred)
    prec = precision_score(y_test, y_pred, pos_label=0)
    recall = recall_score(y_test, y_pred, pos_label=0)
    f1 = f1_score(y_test, y_pred)

    print(f"模型在测试集上的AUC：{auc_score:.5f}")
    print(f"准确率(accuracy): {acc:.5f}")
    print(f"精确率(precision): {prec:.5f}")
    print(f"召回率(recall): {recall:.5f}")
    print(f"F1分数: {f1:.5f}")
    print(f"分类评估报告:\n{classification_report(y_test, y_pred)}")

    logger.info(f"模型在测试集上的AUC：{auc_score:.5f}")
    logger.info(f"准确率(accuracy): {acc:.5f}")
    logger.info(f"精确率(precision): {prec:.5f}")
    logger.info(f"召回率(recall): {recall:.5f}")
    logger.info(f"F1分数: {f1:.5f}")

    joblib.dump(stacking_clf, '../model/stacking_model.pkl')
    logger.info("模型已保存至 ../model/stacking_model.pkl")

    try:
        if hasattr(stacking_clf.named_estimators_['xgb'], 'feature_importances_'):
            importances = stacking_clf.named_estimators_['xgb'].feature_importances_
        elif hasattr(stacking_clf.named_estimators_['rf'], 'feature_importances_'):
            importances = stacking_clf.named_estimators_['rf'].feature_importances_
        else:
            importances = np.zeros(len(x_train.columns))
    except Exception as e:
        importances = np.zeros(len(x_train.columns))
        logger.warning(f"无法获取特征重要性: {e}")

    feature_importance_df = pd.DataFrame({
        'Feature': x_train.columns,
        'Importance': importances
    }).sort_values(by='Importance', ascending=False)

    plt.figure(figsize=(12, 8))
    sns.barplot(x='Importance', y='Feature', data=feature_importance_df, palette="viridis")
    plt.title('Stacking 模型特征重要性（取自 XGBoost 或 RF）')
    plt.tight_layout()
    plt.savefig('../result/模型的特征权重.png')
    logger.info("=========模型训练完成===================")


# =================== 主程序入口 ===================
if __name__ == '__main__':
    param_dict = {
        'n_estimators': [i for i in range(10, 500, 10)],
        'max_depth': [i for i in range(2, 20, 1)],
        'learning': [0.01,0.05,0.1,0.15]
    }

    input_file = os.path.join('../data', 'train.csv')
    model = PowerLoadModel(input_file)
    X_train, Y_train = feature_engineering(model.data_source, model.logfile)

    best_result = model_CV_train(X_train, Y_train, model.logfile, param_dict)
    best_params = best_result['best_params']

    rf_n_estimators = best_params['rf__n_estimators']
    rf_max_depth = best_params['rf__max_depth']
    xgb_n_estimators = best_params['xgb__n_estimators']
    xgb_max_depth = best_params['xgb__max_depth']
    xgb_learning_rate = best_params.get('xgb__learning_rate', 0.1)

    model_train(X_train, Y_train, model.logfile,
                n_estimators=rf_n_estimators,
                max_depth=rf_max_depth,
                n_estimators1=xgb_n_estimators,
                max_depth1=xgb_max_depth)