import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score
from sklearn.tree import plot_tree
import joblib
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from src.utils.common import data_processing
from src.utils.log import Logger
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from src.utils.common import data_processing
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
import lightgbm as lgb
from src.features.FeatureEngineering import feature_extra,EmployeeAttritionFeatureEngineer
from imblearn.over_sampling import SMOTE
import os

os.environ["LOKY_MAX_CPU_COUNT"] = "8"


class TrainModel(object):
    def __init__(self):
        """
        日志
        """
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../../', logfile_name).get_logger()


# 逻辑回归
def logistic_model(x, y):
    transfer = StandardScaler()
    x = transfer.fit_transform(x)

    from imblearn.over_sampling import SMOTE
    smote = SMOTE(random_state=13, sampling_strategy=0.8)
    x, y = smote.fit_resample(x, y)

    es = LogisticRegression(max_iter=10000, class_weight='balanced')

    scoring = {
        'AUC': 'roc_auc',
        'F1': 'f1',
        'Precision': 'precision',
        'Recall': 'recall'
    }
    param_grid = [
        {
            'C': np.logspace(-4, 2, 20),
            'solver': ['liblinear', 'saga']
        }
    ]
    es = GridSearchCV(es, param_grid, scoring=scoring, cv=5, refit='AUC')
    es.fit(x, y)
    x_pre = es.predict(x)
    x_predict = es.predict_proba(x)[:, 1]
    print(f'准确率：{accuracy_score(y, x_pre)}')
    print(f'分类报告：{classification_report(y, x_pre)}')
    print(f'ROC:{roc_auc_score(y, x_predict)}')
    print((f'最优超参：{es.best_params_}'))

    joblib.dump(es, "../../model/logistic_model.pkl")


# 决策树
def decisiontree_model(x, y):
    transfer = StandardScaler()
    x = transfer.fit_transform(x)

    es = DecisionTreeClassifier(max_depth=2, random_state=1)
    es.fit(x, y)
    x_pre = es.predict(x)
    x_predict = es.predict_proba(x)[:, 1]
    print(f'准确率：{accuracy_score(x, x_pre)}')
    print(f'分类报告：{classification_report(x, x_pre)}')
    print(f'ROC:{roc_auc_score(x, x_predict)}')
    joblib.dump(es, "../../model/decisiontree_model.pkl")
    # plt.figure(figsize=(50, 30))
    # plot_tree(es, filled=True, max_depth=10)
    # plt.show()


# 随机森林
def randomforest_model(x, y):
    # x, y = data_processing(1)
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=24, stratify=y)

    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    es = RandomForestClassifier(max_depth=8, n_estimators=187, random_state=24)
    # es = RandomForestClassifier()
    # param = {
    #     'n_estimators': [i for i in range(1, 201)],
    #     'max_depth': [i for i in range(1, 11)],
    #     'random_state': [24]
    # }
    # es = GridSearchCV(es, param_grid=param, cv=5)
    es.fit(x_train, y_train)
    y_pre = es.predict(x_test)
    y_predict = es.predict_proba(x_test)[:, 1]
    print(f'准确率：{accuracy_score(y_test, y_pre)}')
    print(f'分类报告：{classification_report(y_test, y_pre)}')
    print(f'ROC:{roc_auc_score(y_test, y_predict)}')  # 0.7420
    # print(f"最佳参数:{es.best_estimator_}")
    # print((f'最优超参：{es.best_params_}'))

    joblib.dump(es.best_estimator_, "../../model/random_forest.pkl")
    plt.figure(figsize=(50, 30))
    plot_tree(es.best_estimator_.estimators_[0], filled=True, max_depth=10)
    plt.savefig("../../data/fig/random_tree.png")
    plt.show()


# GBDT
def gradientboosting_model(x, y):
    transfer = StandardScaler()
    x = transfer.fit_transform(x)

    # es = GradientBoostingClassifier(learning_rate=0.01, max_depth=3, n_estimators=120, random_state=24)
    es = GradientBoostingClassifier()
    param = {
        'learning_rate': [0.001, 0.01],
        'n_estimators': [i for i in range(51, 101)],
        'max_depth': [i for i in range(1, 11)],
        'random_state': [24]
    }
    # # 1-50 learning_rate=0.01, max_depth=6, n_estimators=46,random_state=24   66.95
    # # 51-101 learning_rate': 0.01, 'max_depth': 7, 'n_estimators': 51, 'random_state': 24
    # es = GridSearchCV(es, param_grid=param, cv=5)
    es.fit(x, y)
    y_pre = es.predict(x)
    y_predict = es.predict_proba(x)[:, 1]
    print(f'准确率：{accuracy_score(y, y_pre)}')
    print(f'分类报告：{classification_report(y, y_pre)}')
    print(f'ROC:{roc_auc_score(y, y_predict)}')  # 0.7139
    # print(f"最佳参数:{es.best_estimator_}")
    # print((f'最优超参：{es.best_params_}'))


# XGBOOST
def xgboost_model(x, y):
    transfer = StandardScaler()
    x = transfer.fit_transform(x)

    # import os
    # os.environ["LOKY_MAX_CPU_COUNT"] = "8"
    #
    # from imblearn.over_sampling import SMOTE
    # smote = SMOTE(random_state=13)
    # x, y = smote.fit_resample(x, y)

    es = xgb.XGBClassifier(learning_rate=0.01, max_depth=3, n_estimators=150, random_state=26)
    # es = xgb.XGBClassifier()
    # param = {
    #     'learning_rate': [0.01, 0.1, 0.001],
    #     'n_estimators': [40, 80, 120, 170],
    #     'max_depth': [3, 5, 7, 9],
    #     'random_state': [26]
    # }
    # es = GridSearchCV(es, param_grid=param, cv=5)
    es.fit(x, y)
    x_pre = es.predict(x)
    x_predict = es.predict_proba(x)[:, 1]
    print(f'准确率：{accuracy_score(y, x_pre)}')
    print(f'分类报告：{classification_report(y, x_pre)}')
    print(f'ROC:{roc_auc_score(y, x_predict)}')
    # print(f"最佳参数:{es.best_estimator_}")
    # print((f'最优超参：{es.best_params_}'))

    joblib.dump(es, "../../model/xgboost.pkl")


def logic_model_train(data, logger=None):
    """
    优化后的逻辑回归模型训练（完整版）
    :param data: 特征工程处理后的DataFrame（含'Attrition'列和特征列）
    :param logger: 日志对象（可选）
    :return: 最佳模型、测试集评估结果、特征重要性（如有）
    """
    # 1. 数据准备
    x = data.iloc[:, 1:]  # 假设第一列是ID
    y = data['Attrition']

    # 分层划分（保持类别比例）
    x_train, x_test, y_train, y_test = train_test_split(
        x, y, test_size=0.3, random_state=103, stratify=y
    )

    # 2. 构建Pipeline（标准化+模型）
    pipeline = Pipeline([
        ('scaler', StandardScaler()),  # 标准化（自动仅用训练数据fit）
        ('classifier', LogisticRegression(random_state=59))
    ])

    param_grid = [
        # 第一组参数：仅使用 l1/l2 + liblinear
        {'classifier__max_iter': [700, 900, 1100],
         'classifier__penalty': ['l1', 'l2'],
         'classifier__solver': ['liblinear'],
         'classifier__C': [0.01, 0.1, 1, 5, 10, 20],
         'classifier__class_weight': ['balanced', None]
         },
        # 第二组参数：使用 elasticnet + saga
        {'classifier__max_iter': [700, 900, 1100],
         'classifier__penalty': ['elasticnet'],
         'classifier__solver': ['saga'],
         'classifier__C': [0.01, 0.1, 1, 10],
         'classifier__l1_ratio': [0.1, 0.5, 0.9],  # elasticnet 混合比例
         'classifier__class_weight': ['balanced', None]
         }
    ]

    # 4. 网格搜索（带交叉验证）
    grid_search = GridSearchCV(
        estimator=pipeline,
        param_grid=param_grid,
        scoring='roc_auc',
        cv=5,
        n_jobs=-1,
        verbose=1
    )
    grid_search.fit(x_train, y_train)

    # 5. 最佳模型评估
    best_model = grid_search.best_estimator_

    # 5.2 测试集评估
    y_pred = best_model.predict(x_test)
    y_proba = best_model.predict_proba(x_test)[:, 1]

    print("\n=== 测试集性能 ===")
    print(f"最佳参数: {grid_search.best_params_}")
    print(f"AUC: {roc_auc_score(y_test, y_proba):.4f}")
    print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
    print("分类报告:\n", classification_report(y_test, y_pred))
    print("混淆矩阵:\n", confusion_matrix(y_test, y_pred))

    # 6. 保存模型和关键组件
    joblib.dump({
        'model': best_model,
        'feature_names': x_train.columns.tolist(),  # 保存特征名
        'scaler': best_model.named_steps['scaler']  # 保存标准化器
    }, '../model/lr_model.pkl')
    if logger:
        logger.info(f"logic模型训练集 roc值为:{roc_auc_score(y_test, y_proba):.4f}")
    return best_model


# 使用xgClassifier进行模型训练
def xg_model_train(data, logger=None):
    x = data.iloc[:, 1:]
    y = data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=103, stratify=y)
    # 2. 构建Pipeline（标准化+模型）
    pipeline = Pipeline([
        ('scaler', StandardScaler()),  # 标准化（自动仅用训练数据fit）
        ('classifier', XGBClassifier(random_state=59))
    ])

    param_grid: dict = {
        'classifier__n_estimators': [139, 201],  # 树的数量
        'classifier__max_depth': [3, 6, 9],  # 树的最大深度
        'classifier__learning_rate': [0.1, 0.3, 0.5],  # 学习率(eta)
        'classifier__subsample': [0.6, 0.8, 1.0],  # 样本子采样比例
        'classifier__colsample_bytree': [0.4, 0.6, 0.8],  # 特征子采样比例
        'classifier__gamma': [0, 0.1, 0.2],  # 节点分裂所需最小损失减少
        'classifier__reg_alpha': [0.1, 1, 2],  # L1正则化项
        'classifier__reg_lambda': [5, 10, 15],  # L2正则化项
        'classifier__scale_pos_weight': [1, 5, 10],
    }
    grid_search = GridSearchCV(
        estimator=pipeline,
        param_grid=param_grid,
        scoring='roc_auc',
        cv=5,
        n_jobs=-1,
        verbose=1
    )
    grid_search.fit(x_train, y_train)
    best_model = grid_search.best_estimator_
    y_pred = best_model.predict(x_test)
    y_pred_proba = best_model.predict_proba(x_test)[:, 1]
    print("\n=== 训练集集性能 ===")
    print(f"最佳参数: {grid_search.best_params_}")
    print(f"AUC: {roc_auc_score(y_test, y_pred_proba):.4f}")
    print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
    print("分类报告:\n", classification_report(y_test, y_pred))

    # 模型保存
    joblib.dump({
        'model': best_model,
        'feature_names': x_train.columns.tolist(),
        'scaler': best_model.named_steps['scaler']
    }, '../model/xgboost_model.pkl')

    if logger:
        logger.info(f"xg模型训练集 roc值为:{roc_auc_score(y_test, y_pred_proba):.4f}\n"
                    f"最优参数为：\n{grid_search.best_params_}")


def train_model_lightgbm(logger):
    """
    lightgbm算法
    :return:
    """
    x, y = feature_extra(1)
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=11)

    smote = SMOTE(random_state=11)  # sampling_strategy={0: 922, 1: 2305}
    x_train, y_train = smote.fit_resample(x_train, y_train)
    params = {
    }
    es = GridSearchCV(
        lgb.LGBMClassifier(
            boosting_type='gbdt',
            objective='binary',
            learning_rate=0.079,
            n_estimators=229,
            colsample_bytree=0.9,
            max_depth=-1,
            num_leaves=90,
            metric=['auc', 'binary_logloss'],
            random_state=11
        ),
        param_grid=params,
        cv=6)
    es.fit(x_train, y_train)
    y_predict = es.predict(x_test)
    y_scores = es.predict_proba(x_test)[:, 1]

    print(f'准确率：{accuracy_score(y_test, y_predict)}')
    print(f'留存AUC值为：{roc_auc_score(y_test, y_scores)}')
    print(f'最优超参：{es.best_params_}')
    logger.info(f'准确率：{accuracy_score(y_test, y_predict)}, 留存AUC值为：{roc_auc_score(y_test, y_scores)}')
    logger.info(f'lightgbm最优超参组合：{es.best_params_}')

    joblib.dump(es, "../../model/lightgbm.pkl")

if __name__ == '__main__':
    # tm = TrainModel()
    # train_model_lightgbm(tm.logfile)

    x, y = data_processing(1)
    engineer = EmployeeAttritionFeatureEngineer()
    x = engineer.transform(x)
    # x, y = feature_extra(1)
    # print('----------逻辑回归----------')
    logistic_model(x, y)
    # print('----------决策树----------')
    # decisiontree_model(x, y)
    # print('----------随机森林----------')
    # randomforest_model(x, y)
    # print('----------GBDT----------')
    # gradientboosting_model(x, y)
    # print('----------XGBOOST----------')
    # xgboost_model(x, y)
    # pm = feature_engineering.feature_engineering2('../data/train.csv')
    # logic_model_train(pm)
    # xg_model_train(pm)
