import datetime
import warnings

import joblib
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
import numpy as np

from src.zzm.config import config
from src.zzm.feature.feature import feature_proprecessing
from src.zzm.utils.common import data_preprocessing, plt_fig
from src.zzm.utils.log import Logger
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier

warnings.filterwarnings('ignore', category=UserWarning)

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


class BrainPredictModel:
    def __init__(self, path, flag):
        log_file_name = flag + datetime.datetime.now().strftime('%Y%m%d')
        self.log = Logger('../', log_file_name).get_logger()

        self.data = data_preprocessing(path)


def model_train_random_forest(data, logger):
    # 1、特征预处理
    train_data = feature_proprecessing(data)

    # 2、获取 标签列
    y = train_data.pop('Attrition')
    x = train_data[config.FEATURE_NAMES]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 标准化数值特征（使特征具有零均值和单位方差）
    # 注意：esoost对特征缩放不敏感，但标准化有助于某些评估指标
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # 3、模型训练
    # 训练模型并获取特征重要性
    es = RandomForestClassifier(n_estimators=500, random_state=52, max_depth=7)

    print("\n开始随机搜索最佳参数...")
    es.fit(x_train, y_train)

    # 模型预测 AUC需要使用predict_proba这个方法预测，不能使用predict方法(具体可以参考 官网roc_auc_score的参数)
    # predict_proba预测的比predict要高
    y_pred = es.predict_proba(x_test)[:, 1]

    # 4、模型评估
    logger.info(f'训练集AUC:{roc_auc_score(y_test, y_pred)}')

    # 5、保存模型
    joblib.dump(es, '../model/model_zzm.pkl')
    joblib.dump(scaler, '../model/scaler_zzm.pkl')


def model_train_random_forest02(data, logger):
    # 1、特征预处理
    train_data = feature_proprecessing(data)

    # 2、获取 标签列
    y = train_data.pop('Attrition')
    x = train_data[config.FEATURE_NAMES]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 标准化数值特征（使特征具有零均值和单位方差）
    # 注意：esoost对特征缩放不敏感，但标准化有助于某些评估指标
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # 3、模型训练
    # 训练模型并获取特征重要性
    es = RandomForestClassifier()

    # 定义参数分布
    param_dist = {
        'n_estimators': np.arange(50, 501, 50),
        'max_depth': [None] + list(np.arange(5, 31, 5)),
        'min_samples_split': np.arange(2, 21, 2),
        'min_samples_leaf': np.arange(1, 11, 1),
        'max_features': ['sqrt', 'log2'] + list(np.linspace(0.1, 1.0, 10))
    }

    # 随机搜索
    es = RandomizedSearchCV(
        estimator=es,
        param_distributions=param_dist,
        n_iter=50,  # 尝试50种随机组合
        cv=5,
        n_jobs=-1,
        scoring='accuracy',
        random_state=52,
        verbose=1
    )

    print("\n开始随机搜索最佳参数...")
    es.fit(x_train, y_train)

    # 模型预测 AUC需要使用predict_proba这个方法预测，不能使用predict方法(具体可以参考 官网roc_auc_score的参数)
    # predict_proba预测的比predict要高
    y_pred = es.predict_proba(x_test)[:, 1]

    # 4、模型评估
    logger.info(f'最优参数:{es.best_params_},最优得分:{es.best_score_}')
    logger.info(f'训练集AUC:{roc_auc_score(y_test, y_pred)}')

    # 5、保存模型
    joblib.dump(es, '../model/model_zzm.pkl')
    joblib.dump(scaler, '../model/scaler_zzm.pkl')


def model_train_random_forest03(data, logger):
    # 1、特征预处理
    train_data = feature_proprecessing(data)

    # 2、获取 标签列
    y = train_data.pop('Attrition')
    x = train_data[config.FEATURE_NAMES]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 标准化数值特征（使特征具有零均值和单位方差）
    # 注意：esoost对特征缩放不敏感，但标准化有助于某些评估指标
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # 3、模型训练
    # 训练模型并获取特征重要性
    # {'n_estimators': 100, 'min_samples_split': 6, 'min_samples_leaf': 2, 'max_features': 'sqrt', 'max_depth': 20}
    es = RandomForestClassifier(n_estimators=300,
                                min_samples_split=6,
                                min_samples_leaf=2,
                                max_features='sqrt',
                                max_depth=20,
                                random_state=2,
                                n_jobs=1,
                                criterion='entropy')

    # # 定义参数分布
    # param_dist = {
    #     'n_estimators': np.arange(50, 501, 50),
    #     'max_depth': [None] + list(np.arange(5, 31, 5)),
    #     'min_samples_split': np.arange(2, 21, 2),
    #     'min_samples_leaf': np.arange(1, 11, 1),
    #     'max_features': ['sqrt', 'log2'] + list(np.linspace(0.1, 1.0, 10))
    # }
    #
    # # 随机搜索
    # es = RandomizedSearchCV(
    #     estimator=es,
    #     param_distributions=param_dist,
    #     n_iter=50,  # 尝试50种随机组合
    #     cv=5,
    #     n_jobs=-1,
    #     scoring='accuracy',
    #     random_state=52,
    #     verbose=1
    # )

    print("\n开始随机搜索最佳参数...")
    es.fit(x_train, y_train)

    # 模型预测 AUC需要使用predict_proba这个方法预测，不能使用predict方法(具体可以参考 官网roc_auc_score的参数)
    # predict_proba预测的比predict要高
    y_pred = es.predict_proba(x_test)[:, 1]

    # 4、模型评估
    # logger.info(f'最优参数:{es.best_params_},最优得分:{es.best_score_}')
    logger.info(f'训练集AUC:{roc_auc_score(y_test, y_pred)}')

    # 5、保存模型
    joblib.dump(es, '../model/model_zzm.pkl')
    joblib.dump(scaler, '../model/scaler_zzm.pkl')


def model_train_pipe_line(data, logger):
    # 1、特征预处理
    train_data = feature_proprecessing(data)

    # 2、获取 标签列
    y = train_data.pop('Attrition')
    x = train_data[config.FEATURE_NAMES]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 标准化数值特征（使特征具有零均值和单位方差）
    # 注意：esoost对特征缩放不敏感，但标准化有助于某些评估指标
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # 3、模型训练
    # model1 = RandomForestClassifier(random_state=25)
    model1 = RandomForestClassifier(n_estimators=500,
                                    min_samples_split=5,
                                    min_samples_leaf=1,
                                    max_features='log2',
                                    max_depth=20,
                                    random_state=22,
                                    n_jobs=-1)


    model1.fit(x_train, y_train)
    y_pred = model1.predict_proba(x_test)[:, 1]
    # x_train_new = model1.predict_proba(x_train)
    # x_test_new = model1.predict_proba(x_test)
    #
    # x_train_combined = np.hstack([x_train, x_train_new])
    # x_test_combined = np.hstack([x_test, x_test_new])
    #
    # model2 = LogisticRegression(random_state=25)
    # model2.fit(x_train_combined, y_train)

    # y_pred = model2.predict_proba(x_test_combined)[:, 1]
    # 4、模型评估
    logger.info(f'训练集AUC:{roc_auc_score(y_test, y_pred)}')
    print(f'训练集AUC:{roc_auc_score(y_test, y_pred)}')

    # 5、绘制ROC曲线
    plt_fig(y_test, y_pred)

    # 5、保存模型
    joblib.dump(model1, '../model/model1_zzm.pkl')
    # joblib.dump(model2, '../model/model2_zzm.pkl')
    joblib.dump(scaler, '../model/scaler_zzm.pkl')


if __name__ == '__main__':
    pm = BrainPredictModel('../../../data/raw/train.csv', 'train_')
    # feature_engineering(pm.data, pm.log)
    # model_train_random_forest(pm.data, pm.log)
    # model_train_random_forest02(pm.data, pm.log)
    # model_train_random_forest03(pm.data, pm.log)
    model_train_pipe_line(pm.data, pm.log)
