import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.metrics import accuracy_score
import joblib
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report,precision_score, recall_score, f1_score)
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import os
import datetime
current_script_path = os.path.abspath(__file__)
src_dir = os.path.dirname(current_script_path)
root_path = os.path.dirname(src_dir)
if root_path not in sys.path:
    sys.path.append(root_path)
from util.logUtil import Logger


def setup_logger():
    root_path = r'E:\AAI\anzhuangbao\python\PYCharmanzhuang\pythonProject\0816\Brain drain'
    logfile_name = f"model_training_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}"
    logger = Logger(root_path,logfile_name,level='info').get_logger()
    logger.info('=== 模型训练流程开始 ===')
    logger.info(f'日志文件: {logfile_name}.log')
    return logger


def load_data(logger):
    try:
        train_data = pd.read_csv(r'E:\AAI\anzhuangbao\python\PYCharmanzhuang\pythonProject\0816\Brain drain\data\train.csv')
        test_data = pd.read_csv(r'E:\AAI\anzhuangbao\python\PYCharmanzhuang\pythonProject\0816\Brain drain\data\test.csv')
        logger.info('数据已加载成功')
        logger.info(f'训练集形状：{train_data.shape}')
        logger.info(f'测试集形状：{test_data.shape}')
        logger.info(f'训练集列名：{list(train_data.columns)}')
        return train_data,test_data
    except FileNotFoundError as e:
        logger.error(f'文件未找到：{e}')
        raise


def preprocess_data(train_data,test_data, logger):
    y_train = train_data['Attrition']
    y_test = test_data['Attrition']
    x_train = train_data.drop('Attrition',axis =1)
    x_test = test_data.drop('Attrition',axis =1)
    logger.info(f'目标变量分布 - 训练集: {y_train.value_counts().to_dict()}')
    logger.info(f'目标变量分布 - 测试集: {y_test.value_counts().to_dict()}')

    numeric_features = x_train.select_dtypes(include=['int64']).columns
    categorical_features = x_train.select_dtypes(include=['object']).columns
    logger.info(f'数值型特征（{len(numeric_features)}个）：{list(numeric_features)}')
    logger.info(f'分类型特征 ({len(categorical_features)}个): {list(categorical_features)}')

    numeric_transformer = Pipeline(steps=[
        ('scaler',StandardScaler()),
        ('poly',PolynomialFeatures(degree = 2))
    ])
    categorical_transformer = Pipeline(steps=[
        ('onehot',OneHotEncoder(handle_unknown='ignore'))
    ])
    preprocessor = ColumnTransformer(transformers=[
        ('num',numeric_transformer,numeric_features),
        ('cat',categorical_transformer,categorical_features)
    ])
    logger.info('数据预处理和特征工程已完成')
    return x_train,x_test,y_train,y_test,preprocessor


def build_and_tune_model(x_train,y_train,preprocessor, logger):
    rf_param_grid = {
        'classifier__n_estimators':[100,200,300],
        'classifier__max_depth':[None,10,20],
        'classifier__min_samples_split':[2,5],
        'classifier__min_samples_leaf':[1,2]
    }
    rf_model = Pipeline(steps=[
        ('preprocessor',preprocessor),
        ('classifier',RandomForestClassifier(random_state=42))
    ])
    gbm_param_grid = {
        'classifier__n_estimators':[100,200,300],
        'classifier__learning_rate':[0.01,0.1],
        'classifier__max_depth':[3,5]
    }
    gbm_model = Pipeline(steps=[
        ('preprocessor',preprocessor),
        ('classifier', GradientBoostingClassifier(random_state=42))
    ])

    models = {
        'RandomForest':(rf_model,rf_param_grid),
        'GradientBoosting':(gbm_model,gbm_param_grid)
    }
    best_model = None
    best_accuracy = 0
    best_model_name = ""

    for model_name,(model,param_grid) in models.items():
        logger.info(f'开始调优 {model_name} 模型...')
        logger.info(f'{model_name} 参数网格: {param_grid}')

        grid_search = GridSearchCV(model,param_grid,cv=3,scoring='accuracy',n_jobs=1)
        grid_search.fit(x_train,y_train)
        logger.info(f'{model_name} 最佳参数: {grid_search.best_params_}')
        logger.info(f'{model_name} 最佳交叉验证准确率: {grid_search.best_score_:.4f}')
        if grid_search.best_score_ > best_accuracy:
            best_accuracy = grid_search.best_score_
            best_model = grid_search.best_estimator_
            best_model_name = model_name
    logger.info(f'最佳模型为{best_model_name},交叉验证最佳准确率为 {best_accuracy * 100:.2f}%')
    return best_model


def  evaluate_model(best_model,x_test,y_test, logger):
    y_pred_best = best_model.predict(x_test)
    accuracy_best = accuracy_score(y_test,y_pred_best)
    print(f'调优后模型准确性：{accuracy_best*100:.2f}%')
    logger.info(f'调优后模型准确性：{accuracy_best*100:.2f}%')
    from sklearn.metrics import precision_score,recall_score,f1_score
    precision = precision_score(y_test,y_pred_best)
    recall = recall_score(y_test,y_pred_best)
    f1 = f1_score(y_test,y_pred_best)
    logger.info(f'精确率: {precision * 100:.2f}%')
    logger.info(f'召回率: {recall * 100:.2f}%')
    logger.info(f'F1分数: {f1 * 100:.2f}%')
    return y_pred_best


def save_model(best_model, logger):
    joblib.dump(best_model,'best_model.pkl')
    logger.info('最佳模型已保存为pkl文件')


plt.rcParams['figure.dpi']
plt.rcParams['font.sans-serif'] = ['WenQuanYi Zen Hei']
plt.rcParams['axes.unicode_minus'] = False


def plot_and_report(y_test,y_pred_best, logger):
    conf_matrix = confusion_matrix(y_test,y_pred_best)
    plt.figure(figsize=(8,6))
    sns.heatmap(conf_matrix,annot=True,fmt='d',cmap='Blues')
    plt.xlabel('预测值')
    plt.xticks(rotation=45)
    plt.ylabel('真实值')
    plt.title('混淆矩阵')
    plot_dir = r'E:\AAI\anzhuangbao\python\PYCharmanzhuang\pythonProject\0816\Brain drain\data'
    os.makedirs(plot_dir, exist_ok=True)
    plot_path = os.path.join(plot_dir, f'confusion_matrix_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}.png')
    plt.show()
    print('分类报告：')
    print(classification_report(y_test,y_pred_best))
    logger.info('已绘制混淆矩阵并打印分类报告')



if __name__ == '__main__':
    logger = setup_logger()
    train_data, test_data = load_data(logger)
    x_train,x_test, y_train,y_test ,preprocessor = preprocess_data(train_data,test_data,logger)
    best_model = build_and_tune_model(x_train,y_train,preprocessor,logger)
    y_pred_best = evaluate_model(best_model,x_test,y_test,logger)
    save_model(best_model,logger)
    plot_and_report(y_test,y_pred_best,logger)