# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import datetime
from utils.log import Logger
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, precision_score, recall_score, f1_score, \
    roc_auc_score, classification_report, confusion_matrix
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from imblearn.over_sampling import SMOTE
import joblib
import warnings

warnings.filterwarnings('ignore')

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


def analyze_data(data, logger):
    """
    增强的数据分析功能
    """
    logger.info('=========开始数据分析==============')

    # 1. 基本信息分析
    logger.info("数据基本信息：")
    logger.info(f"数据形状: {data.shape}")
    logger.info(f"数据类型:\n{data.dtypes}")

    # 2. 目标变量分析
    attrition_counts = data['Attrition'].value_counts()
    logger.info(f"离职分布:\n{attrition_counts}")

    # 3. 可视化分析
    plt.figure(figsize=(15, 10))

    # 3.1 离职分布饼图
    plt.subplot(2, 2, 1)
    plt.pie(attrition_counts, labels=['在职', '离职'], autopct='%1.1f%%', colors=sns.color_palette('pastel'))
    plt.title('员工离职分布')

    # 3.2 年龄分布
    plt.subplot(2, 2, 2)
    sns.boxplot(x="Attrition", y="Age", data=data)
    plt.title('年龄与离职关系')

    # 3.3 收入分布
    plt.subplot(2, 2, 3)
    sns.boxplot(x="Attrition", y="MonthlyIncome", data=data)
    plt.title('月收入与离职关系')

    # 3.4 工作年限分布
    plt.subplot(2, 2, 4)
    sns.boxplot(x="Attrition", y="TotalWorkingYears", data=data)
    plt.title('工作年限与离职关系')

    plt.tight_layout()
    plt.savefig('../data/fig/数据分析.png')
    plt.close()

    # 4. 相关性分析
    numeric_cols = data.select_dtypes(include=['float64', 'int64']).columns
    correlation = data[numeric_cols].corr()
    plt.figure(figsize=(12, 8))
    sns.heatmap(correlation, annot=True, cmap='coolwarm', center=0)
    plt.title('特征相关性热力图')
    plt.savefig('../data/fig/相关性分析.png')
    plt.close()


def feature_engineering(data, logger):
    """
    增强的特征工程
    """
    logger.info('=========开始特征工程==============')
    result = data.copy()

    # 1. 删除无用特征
    columns_to_drop = ['EmployeeNumber', 'Over18', 'StandardHours']
    result = result.drop(columns=columns_to_drop, axis=1)

    # 2. 将目标变量转换为数值型标签
    logger.info('转换目标变量为数值型标签...')
    le = LabelEncoder()
    result['Attrition'] = le.fit_transform(result['Attrition'])
    joblib.dump(le, '../model/label_encoder.pkl')

    # 3. 添加交互特征
    logger.info('添加交互特征...')
    result['工作年限与年龄比'] = result['TotalWorkingYears'] / result['Age']
    result['晋升等待时间'] = result['YearsAtCompany'] - result['YearsSinceLastPromotion']
    result['薪资增长率'] = result['PercentSalaryHike'] / (result['YearsAtCompany'] + 1)
    result['工作满意度与绩效比'] = result['JobSatisfaction'] / result['PerformanceRating']
    result['工作投入度与满意度比'] = result['JobInvolvement'] / result['JobSatisfaction']
    result['工作生活平衡与满意度比'] = result['WorkLifeBalance'] / result['JobSatisfaction']

    # 4. 分箱处理
    logger.info('对连续变量进行分箱处理...')
    # 距离分组
    cut_labels = ['Near', 'Reasonable', 'Far']
    cut_bins = [-1, result['DistanceFromHome'].quantile(0.33),
                result['DistanceFromHome'].quantile(0.67),
                result['DistanceFromHome'].max() + 1]
    result['DistanceGroup'] = pd.cut(result['DistanceFromHome'], bins=cut_bins, labels=cut_labels)

    # 其他分箱
    result['年龄分组'] = pd.qcut(result['Age'], q=5, labels=['20-30', '30-40', '40-50', '50-60', '60+'])
    result['收入分组'] = pd.qcut(result['MonthlyIncome'], q=5, labels=['低', '中低', '中', '中高', '高'])
    result['工作年限分组'] = pd.qcut(result['TotalWorkingYears'], q=5,
                                     labels=['0-5年', '5-10年', '10-15年', '15-20年', '20年以上'])

    # 5. 处理分类变量
    logger.info('处理分类变量...')
    nominal_features = ['BusinessTravel', 'Department', 'EducationField',
                        'Gender', 'JobRole', 'MaritalStatus', 'OverTime',
                        'DistanceGroup', '年龄分组', '收入分组', '工作年限分组']

    # 保存类别特征名称
    joblib.dump(nominal_features, '../model/nominal_features.pkl')

    # 使用OneHotEncoder进行编码
    encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)
    encoded = encoder.fit_transform(result[nominal_features])
    encoded_df = pd.DataFrame(encoded, columns=encoder.get_feature_names_out(nominal_features), index=result.index)

    # 删除原始分类列
    result = result.drop(nominal_features, axis=1)

    # 合并编码后的特征
    result = pd.concat([result, encoded_df], axis=1)

    # 保存编码器
    joblib.dump(encoder, '../model/onehot_encoder.pkl')

    # 6. 处理偏态分布
    logger.info('处理偏态分布...')
    numeric_cols = result.select_dtypes(include=['float64', 'int64']).columns
    skewed_features = []
    for col in numeric_cols:
        if col != 'Attrition':  # 跳过目标变量
            skewness = result[col].skew()
            if abs(skewness) > 0.5:
                result[col] = np.log1p(result[col])
                skewed_features.append(col)
    logger.info(f"进行对数变换的特征: {skewed_features}")
    # 7. 标准化数值特征
    logger.info('标准化数值特征...')
    scaler = StandardScaler()
    numeric_cols = result.select_dtypes(include=['float64', 'int64']).columns
    numeric_cols = [col for col in numeric_cols if col != 'Attrition']  # 排除目标变量
    result[numeric_cols] = scaler.fit_transform(result[numeric_cols])
    joblib.dump(scaler, '../model/scaler.pkl')

    # 8. 保存所有特征名称
    feature_names = result.columns.tolist()
    joblib.dump(feature_names, '../model/feature_names.pkl')

    return result


def grid_search_model(model, param_grid, X_train, y_train, logger, model_name):
    """
    使用网格搜索找到最优参数
    """
    logger.info(f'开始对{model_name}进行网格搜索...')
    grid_search = GridSearchCV(
        estimator=model,
        param_grid=param_grid,
        cv=StratifiedKFold(n_splits=3, shuffle=True, random_state=42),  # 减少交叉验证折数
        scoring='roc_auc',
        n_jobs=-1,
        verbose=1
    )
    grid_search.fit(X_train, y_train)

    logger.info(f'{model_name}最优参数: {grid_search.best_params_}')
    logger.info(f'{model_name}最优分数: {grid_search.best_score_:.4f}')

    return grid_search.best_estimator_


def train_models(X_train, y_train, X_test, y_test, logger):
    """
    多模型训练与评估
    """
    logger.info('=========开始模型训练==============')

    # 1. 处理类别不平衡
    smote = SMOTE(random_state=42)
    X_train_balanced, y_train_balanced = smote.fit_resample(X_train, y_train)

    # 2. 定义基础模型
    base_models = {
        'XGBoost': xgb.XGBClassifier(random_state=42),
        'Random Forest': RandomForestClassifier(random_state=42),
        'Gradient Boosting': GradientBoostingClassifier(random_state=42),
        'Logistic Regression': LogisticRegression(random_state=42),
        'Decision Tree': DecisionTreeClassifier(random_state=42),
        'KNN': KNeighborsClassifier()
    }

    # 3. 定义参数网格（减少参数组合）
    param_grids = {
        'XGBoost': {
            'max_depth': [4, 6],
            'learning_rate': [0.05, 0.1],
            'n_estimators': [200, 300],
            'min_child_weight': [1, 2],
            'subsample': [0.8, 1.0],
            'colsample_bytree': [0.8, 1.0]
        },
        'Random Forest': {
            'n_estimators': [200, 300],
            'max_depth': [8, 10],
            'min_samples_split': [2, 5],
            'min_samples_leaf': [1, 2],
            'max_features': ['sqrt']
        },
        'Gradient Boosting': {
            'n_estimators': [200, 300],
            'learning_rate': [0.05, 0.1],
            'max_depth': [4, 6],
            'min_samples_split': [2, 5],
            'subsample': [0.8, 1.0]
        },
        'Logistic Regression': {
            'C': [0.1, 1.0],
            'class_weight': ['balanced'],
            'max_iter': [1000]
        },
        'Decision Tree': {
            'criterion': ['gini'],
            'max_depth': [10, 13],
            'min_samples_split': [2, 4],
            'min_samples_leaf': [1, 2]
        },
        'KNN': {
            'n_neighbors': [3, 5],
            'weights': ['distance'],
            'metric': ['minkowski']
        }
    }

    # 4. 使用网格搜索找到最优参数
    best_models = {}
    for name, model in base_models.items():
        best_model = grid_search_model(model, param_grids[name], X_train_balanced, y_train_balanced, logger, name)
        best_models[name] = best_model

    # 5. 创建投票分类器（只使用表现最好的三个模型）
    # 先评估每个模型的性能
    model_scores = {}
    for name, model in best_models.items():
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        auc = roc_auc_score(y_test, y_pred_proba)
        model_scores[name] = auc

    # 选择AUC最高的三个模型
    top_models = dict(sorted(model_scores.items(), key=lambda x: x[1], reverse=True)[:3])
    logger.info(f"选择表现最好的三个模型: {list(top_models.keys())}")

    voting_clf = VotingClassifier(
        estimators=[(name, best_models[name]) for name in top_models.keys()],
        voting='soft',
        weights=[1, 1, 1]  # 可以根据模型性能调整权重
    )

    # 6. 训练和评估所有模型
    results = pd.DataFrame(columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score', 'AUC'])

    # 添加投票分类器到评估列表
    all_models = {**best_models, 'Voting Classifier': voting_clf}

    for name, model in all_models.items():
        logger.info(f'训练模型: {name}')

        # 训练模型
        model.fit(X_train_balanced, y_train_balanced)

        # 预测
        y_pred = model.predict(X_test)
        y_pred_proba = model.predict_proba(X_test)[:, 1]

        # 计算评估指标
        accuracy = (y_pred == y_test).mean()
        precision = precision_score(y_test, y_pred)
        recall = recall_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred)
        auc = roc_auc_score(y_test, y_pred_proba)

        # 记录结果
        results = pd.concat([results, pd.DataFrame({
            'Model': [name],
            'Accuracy': [accuracy],
            'Precision': [precision],
            'Recall': [recall],
            'F1 Score': [f1],
            'AUC': [auc]
        })], ignore_index=True)

        # 保存模型
        joblib.dump(model, f'../model/{name.lower().replace(" ", "_")}.pkl')

        # 打印详细评估报告
        logger.info(f"\n{name} 模型评估报告:")
        logger.info(f"准确率: {accuracy:.4f}")
        logger.info(f"精确率: {precision:.4f}")
        logger.info(f"召回率: {recall:.4f}")
        logger.info(f"F1分数: {f1:.4f}")
        logger.info(f"AUC: {auc:.4f}")
        logger.info("\n分类报告:")
        logger.info(classification_report(y_test, y_pred))

        # 绘制混淆矩阵
        plt.figure(figsize=(8, 6))
        cm = confusion_matrix(y_test, y_pred)
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title(f'{name} 混淆矩阵')
        plt.ylabel('真实标签')
        plt.xlabel('预测标签')
        plt.savefig(f'../data/fig/{name}_confusion_matrix.png')
        plt.close()

        # 如果是集成模型，绘制特征重要性
        if name in ['XGBoost', 'Random Forest', 'Gradient Boosting']:
            plt.figure(figsize=(12, 6))
            importance = model.feature_importances_

            # 获取前20个最重要的特征
            indices = np.argsort(importance)[-20:]
            plt.barh(range(len(indices)), importance[indices])
            plt.yticks(range(len(indices)), [X_train.columns[i] for i in indices])
            plt.title(f'{name} 特征重要性')
            plt.tight_layout()
            plt.savefig(f'../data/fig/{name}_feature_importance.png')
            plt.close()

    # 7. 选择最佳模型
    best_model = results.loc[results['AUC'].idxmax()]
    logger.info(f"\n最佳模型: {best_model['Model']}")
    logger.info(f"最佳AUC: {best_model['AUC']:.4f}")

    # 8. 保存所有模型结果
    results.to_csv('../data/model_evaluation_results.csv', index=False)

    return results


class AttritionModel(object):
    def __init__(self, filename):
        # 配置日志记录
        logfile_name = "train_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        # 获取数据源
        self.data_source = pd.read_csv(filename)


if __name__ == '__main__':
    """
    模型训练流程：
    1. 加载数据
    2. 数据分析
    3. 特征工程
    4. 模型训练与评估
    """
    # 1. 加载数据
    input_file = os.path.join('../data', 'train.csv')
    model_obj = AttritionModel(input_file)

    # 2. 数据分析
    analyze_data(model_obj.data_source, model_obj.logfile)

    # 3. 特征工程
    processed_data = feature_engineering(model_obj.data_source, model_obj.logfile)

    # 4. 准备训练数据
    X = processed_data.drop('Attrition', axis=1)
    y = processed_data['Attrition']

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

    # 5. 训练和评估模型
    results = train_models(X_train, y_train, X_test, y_test, model_obj.logfile)