import pandas as pd
from imblearn.over_sampling import SMOTE  # SMOTE处理类别不均衡
from sklearn.feature_selection import SelectKBest,RFE,chi2
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
import warnings
import joblib
import numpy as np
from scipy.stats import mstats

warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)

"""
    特征工程，私有化特征处理过程中的方法，仅对外暴露 getFeatures()
"""
class FeatureExtract:


    """
    类初始化，应该在train_test_split方法后调用
        参数：df:原始DataFrame
             ptype:选择模式 train/test
    """
    def __init__(self, df, ptype):
        self.df = df
        self.ptype = ptype

    """
    对外方法,设计为链式调用，可以根据需要的步骤灵活组合来查看效果
    """
    def getFeatures(self):
        return (self.__featuresSelecting()
                .__featureEngineering()
                #.__removeRedundantFeatures()
                # 极端值处理放前面，确保 RFE/Chi2 输入稳定
                .__extremumProcessing()
                #.__overSampling()
                # 下面是消除特征的方法（最好只选一个）
                # 用了也可能分数变低了
                # .__featureSelectionByMI()
                # .__featureSelectionByRFE()
                #.__featureSelectionByChi2()

                .__preProcessing().df)

    """
    对原始特征进行选择
    """
    def __featuresSelecting(self):
        # 观察数据大致情况后，删除与目标无关的EmployeeNumber(员工编号)、standard hours（只有80）、over18（只有Y）三个特征
        self.df.drop(['EmployeeNumber', 'StandardHours', 'Over18'], axis=1, inplace=True)

        # 手动编码
        self.df['BusinessTravel'] = self.df['BusinessTravel'].map(
            {'Non-Travel': 0, 'Travel_Rarely': 1, 'Travel_Frequently': 2})
        self.df['Gender'] = self.df['Gender'].map({'Male': 1, 'Female': 0})
        self.df['OverTime'] = self.df['OverTime'].map({'No': 0, 'Yes': 1})

        # 独热编码列，此处需保留编码器，否则测试集可能出现列名不一致
        cat_cols = ['Department', 'EducationField', 'JobRole', 'MaritalStatus']

        if self.ptype == 'train':
            # 训练集：拟合编码器
            encoder = OneHotEncoder(sparse_output=False, drop=None, handle_unknown='ignore')
            encoder.fit(self.df[cat_cols])
            joblib.dump(encoder, '../model/OneHotEncoder.pkl')

            # 转换
            encoded = encoder.transform(self.df[cat_cols])
            encoded_df = pd.DataFrame(encoded, columns=encoder.get_feature_names_out(cat_cols),
                                      index=self.df.index)

            # 拼接
            self.df = pd.concat([self.df.drop(cat_cols, axis=1), encoded_df], axis=1)

        else:
            # 测试集：加载编码器
            encoder = joblib.load('../model/OneHotEncoder.pkl')
            encoded = encoder.transform(self.df[cat_cols])  # handle_unknown='ignore' 自动补 0
            encoded_df = pd.DataFrame(encoded, columns=encoder.get_feature_names_out(cat_cols),
                                      index=self.df.index)
            self.df = pd.concat([self.df.drop(cat_cols, axis=1), encoded_df], axis=1)

        return self

    """
    构造有意义的组合特征，增强模型对业务逻辑的理解
    """
    def __featureEngineering(self):
        df = self.df
        # 1. 收入效率类
        df['IncomePerLevel'] = df['MonthlyIncome'] / (df['JobLevel'] + 1e-8)
        df['IncomePerYearAtJob'] = df['MonthlyIncome'] / (df['TotalWorkingYears'] + 1e-8)

        # 2. 满意度综合指标
        satisfaction_cols = [
            'JobSatisfaction',
            'EnvironmentSatisfaction',
            'WorkLifeBalance',
            'RelationshipSatisfaction'
        ]
        df['AvgSatisfaction'] = df[satisfaction_cols].mean(axis=1)
        df['SatisfactionStd'] = df[satisfaction_cols].std(axis=1)  # 满意度波动（越大说明不均衡）


        # 3. 成长与稳定性
        df['AvgTenurePerCompany'] = df['TotalWorkingYears'] / (df['NumCompaniesWorked'] + 1e-8)
        df['TenureRatio'] = df['YearsAtCompany'] / (df['TotalWorkingYears'] + 1e-8)
        df['TrainingPerYear'] = df['TrainingTimesLastYear'] / (df['YearsAtCompany'] + 1e-8)

        # 角色晋升速度
        df['YearsPerLevel'] = df['YearsInCurrentRole'] / (df['JobLevel'] + 1e-8)


        # 4. 管理关系
        df['ManagerTenureRatio'] = df['YearsWithCurrManager'] / (df['YearsInCurrentRole'] + 1e-8)
        df['IsManagerChanged'] = (df['YearsWithCurrManager'] < df['YearsInCurrentRole'])
        # 5. 通勤与出差压力
        # BusinessTravel 已编码为 0,1,2 → 直接使用
        df['CommutePressure'] = df['DistanceFromHome'] + df['BusinessTravel']  # 数值越大越辛苦

        # 高通勤风险人群（年轻、单身、远距离）
        df['IsHighCommuteRisk'] = ((df['Age'] < 35) &
                                   (df['MaritalStatus_Single'] == 1) &
                                   (df['DistanceFromHome'] > 15)).astype(int)

        # 6. 晋升与绩效
        # 高绩效但低涨幅
        df['UnderpaidHighPerformer'] = ((df['PerformanceRating'] == 4) &
                                        (df['PercentSalaryHike'] < 15)).astype(int)

        # 晋升停滞指数
        df['NoPromoYearsPerLevel'] = df['YearsSinceLastPromotion'] / (df['JobLevel'] + 1e-8)

        # 7. 年龄与经验比
        df['AgeToExperienceRatio'] = df['Age'] / (df['TotalWorkingYears'] + 1e-8)

        # 8. 当前岗位稳定性
        df['RoleStability'] = df['YearsInCurrentRole'] / (df['YearsAtCompany'] + 1e-8)

        # 9. 薪资涨幅 vs 同级别均值
        df['SalaryHikeRatio'] = df['PercentSalaryHike'] / (
                df.groupby('JobLevel')['PercentSalaryHike'].transform('mean') + 1e-8)

        # 10. 工作强度评分
        df['WorkloadScore'] = df['OverTime'] + df['BusinessTravel'] + (df['DistanceFromHome'] > 15).astype(int)

        # 11. 职业停滞指数
        df['CareerStagnation'] = (df['YearsSinceLastPromotion'] + (5 - df['TrainingTimesLastYear'])) / (
                df['TotalWorkingYears'] + 1e-8)

        # 12. 家庭风险
        df['FamilyRisk'] = ((df['MaritalStatus_Single'] == 1) &
                            (df['MonthlyIncome'] < df['MonthlyIncome'].quantile(0.3)) &
                            (df['DistanceFromHome'] > 20)).astype(int)

        self.df = df
        return self

    """
    对数据集进行过采样，解决原始数据中样本分类不均衡的问题
    """
    def __overSampling(self):
        if self.ptype == 'train':
            # 切分特征和标签
            X = self.df.drop(['Attrition'], axis=1)
            Y = self.df['Attrition']
            # 过采样，不要完全平衡（1:1），避免过拟合少数类
            sm = SMOTE(
                sampling_strategy=0.7,
                k_neighbors=3,
                random_state=82
            )
            X_resampled, Y_resampled = sm.fit_resample(X, Y)
            print(X_resampled.shape, " \t ", Y_resampled.shape)
            self.df = pd.concat([X_resampled, Y_resampled], axis=1)
        # 如果是测试集 不做过采样
        return self

    """
    根据特征之间的相关系数，去除掉冗余特征，缓解特征过多→维灾难→容易过拟合的情况
        返回:pandas.DataFrame: 筛掉冗余特征的数据集
    """
    def __removeRedundantFeatures(self):
        if self.ptype == 'train':
            cor_matrix = self.df.corr().abs()
            upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(bool))
            # 特征之间存在大于0.9的相关性 则删除该列
            to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.9)]
            # 保存删除的列，让测试集也按照这个删除
            with open('../model/to_drop_columns.txt', 'w') as f:
                for col in to_drop:
                    f.write(col + '\n')
            self.df.drop(to_drop, axis=1, inplace=True)
        else:
            with open('../model/to_drop_columns.txt', 'r') as file:
                to_drop = file.read().splitlines()
            self.df.drop([col for col in to_drop if col in self.df.columns], axis=1, inplace=True)
        return self

    """
    根据互信息继续筛选过滤掉不重要的特征，减低维度，增强泛化
        返回:pandas.DataFrame: 保留互信息比较重要的前K个特征
    """
    def __featureSelectionByMI(self):
        if self.ptype == 'train':
            from sklearn.feature_selection import mutual_info_classif
            from sklearn.feature_selection import SelectKBest

            # 只对特征列计算 MI（排除目标变量）
            X = self.df.drop('Attrition', axis=1)
            y = self.df['Attrition']

            # 目前到这一步有40+个特征，选择前 k 个特征，可以调整K值看模型效果
            self.selector_mi = SelectKBest(mutual_info_classif, k=30)
            X_selected = self.selector_mi.fit_transform(X, y)

            # 获取选中的列名
            selected_cols = X.columns[self.selector_mi.get_support()].tolist()
            self.df = pd.concat([pd.DataFrame(X_selected, columns=selected_cols, index=X.index), y], axis=1)

            # 保存 selector
            joblib.dump(self.selector_mi, '../model/SelectKBest_MI.pkl')

        else:
            # 测试集：加载训练时的 selector
            self.selector_mi = joblib.load('../model/SelectKBest_MI.pkl')
            X = self.df.drop('Attrition', axis=1)
            y = self.df['Attrition']
            X_selected = self.selector_mi.transform(X)
            selected_cols = X.columns[self.selector_mi.get_support()].tolist()
            self.df = pd.concat([pd.DataFrame(X_selected, columns=selected_cols, index=X.index), y], axis=1)

        return self

    """
    部分模型对极值比较敏感，遍历数值类型，将1%分位以下和99%分位以上的极值替换为对应分位数的值
    """
    def __extremumProcessing(self):
        X = self.df.drop('Attrition', axis=1)
        for col in X.select_dtypes(include=[np.number]).columns:
            self.df[col] = mstats.winsorize(self.df[col], limits=[0.01, 0.01])
        return self

    """
    使用递归特征消除（RFE）选择最重要的 K 个特征
    基于 LogisticRegression 排序，适合线性可分任务
    """
    def __featureSelectionByRFE(self):

        if self.ptype == 'train':
            X = self.df.drop('Attrition', axis=1)
            y = self.df['Attrition']

            # 使用逻辑回归作为基础模型（因为目前 LR 表现最好）
            estimator = LogisticRegression(C=1.0, solver='liblinear', random_state=82)

            # RFE 选择前 k 个特征（可调）
            k = 30
            selector_rfe = RFE(estimator, n_features_to_select=k, step=1)
            X_selected = selector_rfe.fit_transform(X, y)

            # 获取选中的列名
            selected_cols = X.columns[selector_rfe.support_].tolist()
            self.df = pd.concat([pd.DataFrame(X_selected, columns=selected_cols, index=X.index), y], axis=1)

            # 保存 selector 和列名
            joblib.dump(selector_rfe, '../model/RFE_Selector.pkl')
            with open('../model/RFE_Selected_Columns.txt', 'w') as f:
                for col in selected_cols:
                    f.write(col + '\n')
        else:
            selector_rfe = joblib.load('../model/RFE_Selector.pkl')
            X = self.df.drop('Attrition', axis=1)
            y = self.df['Attrition']

            X_selected = selector_rfe.transform(X)
            selected_cols = X.columns[selector_rfe.support_].tolist()

            self.df = pd.concat([pd.DataFrame(X_selected, columns=selected_cols, index=X.index), y], axis=1)

        return self

    """
    使用卡方检验来进行特征选择
    基于 LogisticRegression 排序
    注意：chi2 要求所有特征 >= 0
    """

    def __featureSelectionByChi2(self):

        if self.ptype == 'train':
            X = self.df.drop('Attrition', axis=1)
            y = self.df['Attrition']

            non_negative_candidates = X.select_dtypes(include=[np.integer, np.floating]).columns.tolist()

            # 过滤：确保训练集中最小值 >= 0（或接近0）
            X_non_negative = X[non_negative_candidates]
            min_vals = X_non_negative.min()
            valid_cols = min_vals[min_vals >= 0].index.tolist()  # 要求最小值 >= 0
            X_chi2 = X_non_negative[valid_cols]

            if len(X_chi2.columns) == 0:
                raise ValueError("没有非负特征，无法使用卡方检验")

            # 选择 K 个最佳特征
            k = 30  # 可调，但不要超过 len(X_chi2.columns)
            k = min(k, len(X_chi2.columns))
            selector_chi2 = SelectKBest(chi2, k=k)
            X_selected = selector_chi2.fit_transform(X_chi2, y)

            # 获取选中的列名
            selected_cols = X_chi2.columns[selector_chi2.get_support()].tolist()
            self.df = pd.concat([pd.DataFrame(X_selected, columns=selected_cols, index=X_chi2.index), y], axis=1)

            joblib.dump(selector_chi2, '../model/Chi2_Selector.pkl')
            with open('../model/Chi2_Selected_Columns.txt', 'w') as f:
                for col in selected_cols:
                    f.write(col + '\n')

        else:
            # 加载训练时选中的列名
            with open('../model/Chi2_Selected_Columns.txt', 'r') as f:
                selected_cols = [line.strip() for line in f if line.strip()]

            # 确保这些列在测试集中存在
            missing_cols = [col for col in selected_cols if col not in self.df.columns]
            if missing_cols:
                print(f"以下列在测试集中缺失：{missing_cols}，将被忽略")
                selected_cols = [col for col in selected_cols if col in self.df.columns]

            X_selected = self.df[selected_cols]
            y = self.df['Attrition']
            self.df = pd.concat([X_selected, y], axis=1)

        return self

    """
    对数据集进行特征标准化解决量纲问题
    """
    def __preProcessing(self):
        # 分离特征和标签
        X = self.df.drop('Attrition', axis=1)
        y = self.df['Attrition']

        if self.ptype == 'train':
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
            joblib.dump(scaler, '../model/StandardScaler.pkl')
        else:
            scaler = joblib.load('../model/StandardScaler.pkl')
            X_scaled = scaler.transform(X)

        # 转回 DataFrame，使用原始列名
        X_scaled_df = pd.DataFrame(X_scaled, columns=X.columns, index=X.index)

        # 重新拼接标签
        self.df = pd.concat([X_scaled_df, y], axis=1)

        return self


