import os
import sys
from tabnanny import verbose
import warnings

from lark import logger

# 将 load_predict_project 目录添加到 Python 的搜索路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 忽略警告
warnings.filterwarnings('ignore')

import io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from utils.log import Logger
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_curve, roc_auc_score, classification_report
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from skopt import gp_minimize
import joblib
import xgboost as xgb
import lightgbm as lgb

# 设置绘图主题和中文显示
sns.set_theme()
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号


# 设置 Pandas 选项，确保显示所有列
pd.set_option('display.max_columns', None)

#  ====================== 分类类型特征的绘制 ======================
def level_plot(data, column, level=True):
    
    """传入数据和要分析的 level 列，绘制分析图

    Args:
        data (_type_): 数据
        column (_type_): 要分析的 level 列
        level (bool, optional): _description_. 是否是level特征
    """
    column_cn_dict = {
    'Attrition'               :'离职情况',
    'Age'                     :'年龄',
    'BusinessTravel'          :'商务旅行情况',
    'Department'              :'部门',
    'DistanceFromHome'        :'家与工作地点的距离',
    'Education'               :'教育程度',
    'EducationField'          :'教育领域',
    'EmployeeNumber'          :'员工编号',
    'EnvironmentSatisfaction' :'工作环境满意度',
    'Gender'                  :'性别',
    'JobInvolvement'          :'工作投入度',
    'JobLevel'                :'工作级别',
    'JobRole'                 :'工作角色',
    'JobSatisfaction'         :'工作满意度',
    'MaritalStatus'           :'婚姻状况',
    'MonthlyIncome'           :'月收入',
    'NumCompaniesWorked'      :'曾工作过的公司数量',
    'Over18'                  :'是否年满18 岁',
    'OverTime'                :'是否加班',
    'PercentSalaryHike'       :'薪资涨幅百分比',
    'PerformanceRating'       :'绩效评级',
    'RelationshipSatisfaction':'人际关系满意度',
    'StandardHours'           :'标准工作时长',
    'StockOptionLevel'        :'股票期权水平',
    'TotalWorkingYears'       :'总工作年限',
    'TrainingTimesLastYear'   :'去年参加培训次数',
    'WorkLifeBalance'         :'工作与生活平衡度',
    'YearsAtCompany'          :'在公司工作年限 ',
    'YearsInCurrentRole'      :'在当前岗位工作年限',
    'YearsSinceLastPromotion' :'自上次晋升以来的年',
    'YearsWithCurrManager'    :'与当前经理共事年限'
}

    # 转为object
    data[column] = data[column].astype('object')
    plt.figure(figsize=(15, 6))
    
    # 计算离职率
    col_data = data.groupby([column], as_index=False).agg({'Attrition': 'sum', 'EmployeeNumber': 'count'})
    col_data['re_rate'] = col_data['Attrition'] / col_data['EmployeeNumber']
    
    # 绘制指定特征的 离职人数和总人数
    p = sns.barplot(col_data, x=column, y='EmployeeNumber', label='总人数')
    p = sns.barplot(col_data, x=column, y='Attrition', label='离职人数')
    
    # 离职率折线图
    ax2 = p.twinx()
    p.legend(loc='upper left')
    ax2.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{x:.1%}'))
    p = sns.lineplot(col_data, x=p.get_xticks(), y='re_rate', c='r', label='离职率', ax=ax2)
    
    ax2.legend(loc='upper right')
    ax2.grid(False)
    ax2.set_ylim(0, 0.45)
    ax2.set_ylabel('离职率')
    
    if level == False:
        column = column[:-6]
    
    p.set_title('不同' + column_cn_dict[column] + '的离职情况')
    plt.savefig(f'../data/fig/不同{column_cn_dict[column]}的离职情况.png')
    plt.show()


#  ====================== int类型特征的绘制 ======================
def int_plot(data, column, bins, labels):
    """传入数据和要分析的 int 列，绘制分析图

    Args:
        data (_type_): 数据
        column (_type_): 要分析的 int 列
        bins (_type_): 分箱列表
        labels (_type_): 分箱标签
    """
    col = column + '_level'
    data[col] = pd.cut(data[column], bins, labels=labels)
    
    level_plot(data, col, level=False)


#  ====================== 数据分析及图形绘制 ======================
def ana_data(path):
    logger = Logger('../', 'data_analyze').get_logger()
    data = pd.read_csv(path)
    
    buffer = io.StringIO()
    
    # data.info(buf=buffer)
    # logger.info(buffer.getvalue())
    
    # Over18 全部为 Y
    # StandardHours 全部是 80
    
    # level 特征的分析
    obj_column_list = ['BusinessTravel', 'Department', 'EducationField',
                       'JobRole', 'MaritalStatus', 'OverTime', 'Gender', 
                       'Education', 'EnvironmentSatisfaction',
                       'JobInvolvement', 'JobLevel', 'JobSatisfaction',
                       'PerformanceRating', 'RelationshipSatisfaction',
                       'StockOptionLevel', 'WorkLifeBalance']
    
    for column in obj_column_list:
        level_plot(data, column)
        
    # logger.info(data.describe())
    
    #
    int_column_dic = {
        'Age': {
            'bins': [17, 30, 36, 43, 70],
            'labels': ['青年', '青壮年', '壮年', '中老年']
        },
        'DistanceFromHome': {
            'bins': [-1, 5, 15, 35],
            'labels': ['近', '中', '远']
        },
        'EmployeeNumber': {
            'bins': [-1, 500, 1500, 2500],
            'labels': ['小', '中', '大']
        },
        'MonthlyIncome': {
            'bins': [1000, 3000, 8000, 25000],
            'labels': ['低', '中', '高']
        },
        'NumCompaniesWorked': {
            'bins': [-1, 3, 6, 10],
            'labels': ['少', '中', '多']
        },
        'PercentSalaryHike': {
            'bins': [6, 12, 18, 30],
            'labels': ['低', '中', '高']
        },
        'TotalWorkingYears': {
            'bins': [-1, 6, 15, 45],
            'labels': ['短', '中', '长']
        },
        'TrainingTimesLastYear': {
            'bins': [-1, 2, 4, 8],
            'labels': ['少', '中', '多']
        },
        'YearsAtCompany': {
            'bins': [-1, 5, 10, 45],
            'labels': ['短', '中', '长']
        },
        'YearsInCurrentRole': {
            'bins': [-1, 3, 7, 20],
            'labels': ['短', '中', '长']
        },
        'YearsSinceLastPromotion': {
            'bins': [-1, 2, 4, 20],
            'labels': ['短', '中', '长']
        },
        'YearsWithCurrManager': {
            'bins': [-1, 3, 7, 20],
            'labels': ['短', '中', '长']
        },
    }
    
    for column in int_column_dic:
        int_plot(data, column, int_column_dic[column]['bins'], int_column_dic[column]['labels'])

    plt.figure(figsize=(50, 48))
    onehot_data = pd.get_dummies(data, drop_first=True)
    corr_matrix = onehot_data.corr(method='spearman')
    corr_p = sns.heatmap(corr_matrix, annot=True, fmt='.2f', cmap='coolwarm')
    corr_p.set_title('斯皮尔曼系数热力图')
    plt.savefig('../data/fig/斯皮尔曼系数热力图.png')


#  ====================== 特征工程 ======================
def feature_engineering(path):
    """特征工程：提取特征，独热编码

    Args:
        path (_type_): 文件路径

    Returns:
        _type_: 数据字典，包含data、target、columns
    """
    df = pd.read_csv(path)
    
    drop_col = ['NumCompaniesWorked', 'PerformanceRating', 'RelationshipSatisfaction',
                'PercentSalaryHike', 'Gender', 'EmployeeNumber', 'Over18', 'StandardHours']
    df = df.drop(columns=drop_col)
    df = pd.get_dummies(df, drop_first=True)
    
    # df.info()
    data = df.drop(columns=['Attrition'])
    target = df['Attrition']
    columns = df.drop(columns=['Attrition']).columns
    
    data_dic = {
        'data': data,
        'target': target,
        'columns': columns
    }
    
    return data_dic


#  ====================== 划分数据集并超采样 ======================
def data_split(data):
    """传入数据，划分数据集并过采样数据

    Args:
        data (_type_): 完整数据

    Returns:
        _type_: X_train, X_test, y_train, y_test, state
    """
    X = data['data']
    y = data['target']
    
    state = np.random.randint(1, 10000)
    # 2513  3218  3943  8719
    state = 8719  
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=state)
    
    smote = SMOTE(random_state=3222)
    X_train, y_train = smote.fit_resample(X_train, y_train)
    
    
    return X_train, X_test, y_train, y_test, state


#  ====================== 贝叶斯优化目标函数 ======================
def objective(params, data):
    n_estimators, learning_rate, max_depth, num_leaves, subsample = params
    X_train, X_test, y_train, y_test, state = data_split(data)

    best_model = lgb.LGBMClassifier(
        n_estimators=n_estimators,
        learning_rate=learning_rate,
        max_depth=max_depth,
        num_leaves=num_leaves,
        subsample=subsample,
        verbose=-1
    )
    
    best_model.fit(X_train, y_train)
    y_proba = best_model.predict_proba(X_test)[:, 1]
    auc = roc_auc_score(y_test, y_proba)
    
    return 1 - auc  # 返回 1 - AUC (因为 Skopt 默认是最小化)
    
    
#  ====================== 贝叶斯超参数空间搜索 ======================
def bayes_search(data):
    params_space = [
        (100, 500),           # n_estimators
        (0.01, 0.2),          # learning_rate
        (3, 15),              # max_depth
        (20, 50),             # num_leaves
        (0.5, 1.0)            # subsample
    ]
    
    res = gp_minimize(
        lambda params: objective(params, data), # 目标函数
        params_space,    # 搜索空间
        n_calls=50,      # 评估50次
        random_state=42  # 随机种子
    )
    
    logger = Logger('../', 'best_params').get_logger()
    logger.info(res.x)


#  ====================== 模型训练 ======================
def model_train(data, algorithm, bayes_search_on=False):
    """模型训练

    Args:
        data (_type_): 数据
        algorithm (str, optional): 模型算法. Defaults to 'lgb'.

    Returns:
        _type_: 随机种子, roc_auc值
    """
    X_train, X_test, y_train, y_test, state = data_split(data)

    if bayes_search_on:
        bayes_search(data)

    match algorithm:
        case 'xgb':
            xgb_params = {
                        'learning_rate': 0.08, 
                        'max_depth': 8, 
                        'n_estimators': 150
                        }
            model = xgb.XGBClassifier(**xgb_params)
        case 'lgb':
            lgb_param = {
                        'n_estimators': 250, 
                        'learning_rate': 0.1, 
                        'max_depth': 7, 
                        'num_leaves': 20, 
                        'subsample': 0.6,
                        }
            model = lgb.LGBMClassifier(**lgb_param, verbose=-1)
        case 'rf':
            rf_params = {
                        'n_estimators': 200,
                        'max_depth': 8
                        }
            model = RandomForestClassifier(**rf_params)

    model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    print(classification_report(y_test, y_pred))
    
    y_proba = model.predict_proba(X_test)[:, 1]
    roc = roc_auc_score(y_test, y_proba)
    print('train_roc_auc:', roc)
    
    joblib.dump(model, f'../model/{algorithm}_model.pkl')
    
    print(state)
    
    return state, roc, y_proba
    
    
def model_predict(data, algorithm):    
    X = data['data']
    y = data['target']
    
    model = joblib.load(f'../model/{algorithm}_model.pkl')
    
    y_pred = model.predict(X)
    print(classification_report(y, y_pred))
    
    y_proba = model.predict_proba(X)[:, 1]
    roc = roc_auc_score(y, y_proba)
    print('test_roc_auc:', roc)
    
    return roc, y_proba


def ensemble_model(data):
    y = data['target']
    
    lgb_roc, lgb_proba = model_predict(data, 'lgb')
    xgb_roc, xgb_proba = model_predict(data, 'xgb')
    rf_roc, rf_proba = model_predict(data, 'rf')
    
    w1, w2, w3 = 0.9, 0.05, 0.05
    final_proba = w1 * lgb_proba + w2 * xgb_proba + w3 * rf_proba
    final_pred = np.where(final_proba > 0.5, 1, 0)
    
    roc = roc_auc_score(y, final_proba)
    print('ensemble_test_roc_auc:', roc)
    
    return roc


if __name__ == '__main__':
    # ana_data('../data/train.csv')
    
    algorithms = ['lgb', 'xgb', 'rf']
    data = feature_engineering('../data/train.csv')
    test_data = feature_engineering('../data/test.csv')

# while True:
    # for algorithm in algorithms:
    #     test_roc = model_predict(test_data, algorithm)
    #     state, train_roc, _ = model_train(data, algorithm, bayes_search_on=False)
    
    roc = ensemble_model(test_data)
    # if roc >=0.88:
    #     break

    # logger = Logger('../', f'{algorithm}_random_state').get_logger()
    # logger.info(f'state:{state}, train_roc:{train_roc}, test_roc:{test_roc}')
    