import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import joblib
import os

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class DataProcessor:
    """统一的数据处理器"""
    
    def __init__(self, config=None):
        self.config = config or self._get_default_config()
        self.preprocessor = None
        self.feature_selector = None
        self.feature_importance = None
        
    def _get_default_config(self):
        """默认配置"""
        return {
            'drop_columns': ['EmployeeNumber', 'Over18', 'StandardHours'],
            'target_column': 'Attrition',
            'test_size': 0.2,
            'random_state': 1234,
            'feature_selection': {
                'method': 'manual',  # 改为手动选择
                'manual_features': [
                    "BusinessTravel", "Department", "EducationField", 
                    "EnvironmentSatisfaction", "JobRole", "MaritalStatus", "OverTime",
                    "Age", "DistanceFromHome", "Education", "Gender", "JobInvolvement",
                    "JobLevel", "JobSatisfaction", "MonthlyIncome", "NumCompaniesWorked",
                    "PercentSalaryHike", "PerformanceRating", "RelationshipSatisfaction",
                    "StockOptionLevel", "TotalWorkingYears", "TrainingTimesLastYear",
                    "WorkLifeBalance", "YearsAtCompany", "YearsInCurrentRole",
                    "YearsSinceLastPromotion", "YearsWithCurrManager"
                ]
            }
        }

    def create_features(self, data):
        """
        特征工程：创建高阶组合特征
        """
        # 避免修改原始数据
        data = data.copy()

        # 1. 稳定性类
        data['ManagerStability'] = data['YearsWithCurrManager'] / (data['YearsAtCompany'] + 1)

        # 2. 职业发展类
        data['CareerGrowthRate'] = data['JobLevel'] / (data['TotalWorkingYears'] + 1)

        # 3. 工作压力类
        data['OvertimeImpact'] = np.where(data['OverTime'] == 'Yes', data['JobLevel'], 0)
        data['CommutePressure'] = data['DistanceFromHome'] * (data['OverTime'] == 'Yes').astype(int)
        data['WorkloadIndex'] = (data['TrainingTimesLastYear'] + 1) / (data['WorkLifeBalance'] + 1)

        # 4. 按 JobRole 的收入 Z-score（需要 groupby，适用于训练集）
        if 'JobRole' in data.columns:
            # 注意：transform 会保留原始索引，适合用于训练/测试
            data['Income_zscore_by_JobRole'] = data.groupby('JobRole')['MonthlyIncome'].transform(
                lambda x: (x - x.mean()) / (x.std() + 1e-6)
            )

        # 5. 先计算每个职级的收入中位数作为基准
        level_income = data.groupby('JobLevel')['MonthlyIncome'].median().to_dict()
        data['IncomePerLevel'] = data['MonthlyIncome'] / data['JobLevel'].map(level_income)

        # 6. 培训投资：培训次数与职级的乘积（衡量公司对不同职级员工的培训投入）
        data['TrainingInvestment'] = data['TrainingTimesLastYear'] * data['JobLevel']

        # 7. 将分类变量转换为数值liu
        data['OvertimeNum'] = data['OverTime'].map({'No': 0, 'Yes': 1})
        data['TravelNum'] = data['BusinessTravel'].map({'Non-Travel': 0, 'Travel_Rarely': 1, 'Travel_Frequently': 2})
        data['WorkIntensityIndex'] = data['OvertimeNum'] + data['TravelNum']

        # 8. 晋升潜力指数：高绩效 + 高参与度 + 低职级
        data['Promotion_Potential'] = (data['PerformanceRating'] * data['JobInvolvement']) / (data['JobLevel'] + 1)

        return data
    
    def load_data(self, train_path, test_path=None):
        """加载数据"""
        train_data = pd.read_csv(train_path)
        test_data = pd.read_csv(test_path) if test_path else None
        return train_data, test_data
    
    def analyze_features(self, data, target_col='Attrition'):
        """特征分析"""
        print("=== 特征重要性分析 ===")
        
        # 分离特征和目标
        X = data.drop(columns=[target_col] + self.config['drop_columns'], errors='ignore')
        y = data[target_col]
        
        # 分类特征分析（卡方检验）
        categorical_cols = X.select_dtypes(include=['object']).columns
        for col in categorical_cols:
            if col in X.columns:
                contingency_table = pd.crosstab(X[col], y)
                chi2, p_value = stats.chi2_contingency(contingency_table)[:2]
                print(f"{col:25s} 卡方值: {chi2:.4f}, P值: {p_value:.4f}")
        
        # 数值特征分析（t检验）
        numerical_cols = X.select_dtypes(include=['int64', 'float64']).columns
        for col in numerical_cols:
            if col in X.columns:
                group1 = X[y == 0][col]
                group2 = X[y == 1][col]
                t_stat, p_value = stats.ttest_ind(group1, group2)
                print(f"{col:25s} t={t_stat:.4f}, P值={p_value:.4f}")
        
        return X, y

    def preprocess_data(self, train_data, test_data=None):
        """数据预处理（含特征工程）"""
        print("=== 数据预处理与特征工程 ===")

        # ✅ 第一步：应用特征工程（组合）
        train_data = self.create_features(train_data)

        if test_data is not None:
            test_data = self.create_features(test_data)  # 同样处理测试集
        else:
            # 从训练集中分割
            train_data, test_data = train_test_split(
                train_data,
                test_size=self.config['test_size'],
                random_state=self.config['random_state'],
                stratify=train_data[self.config['target_column']]
            )

        # 分离特征和目标
        X_train = train_data.drop(columns=[self.config['target_column']] + self.config['drop_columns'], errors='ignore')
        y_train = train_data[self.config['target_column']]

        X_test = test_data.drop(columns=[self.config['target_column']] + self.config['drop_columns'], errors='ignore')
        y_test = test_data[self.config['target_column']]

        # 创建预处理器（自动识别新特征）
        numeric_cols = X_train.select_dtypes(include=['int64', 'float64']).columns
        categorical_cols = X_train.select_dtypes(include=['object']).columns

        self.preprocessor = ColumnTransformer(
            transformers=[
                ('num', StandardScaler(), numeric_cols),
                ('cat', OneHotEncoder(handle_unknown='ignore'), categorical_cols)
            ]
        )

        # 拟合并转换
        X_train_processed = self.preprocessor.fit_transform(X_train)
        X_test_processed = self.preprocessor.transform(X_test)

        # 特征选择（可选）
        if self.config['feature_selection']['method'] == 'kbest':
            k = self.config['feature_selection'].get('k', 20)
            self.feature_selector = SelectKBest(score_func=f_classif, k=k)
            X_train_selected = self.feature_selector.fit_transform(X_train_processed, y_train)
            X_test_selected = self.feature_selector.transform(X_test_processed)

            # 获取特征名并保存重要性
            feature_names = self._get_feature_names()
            self.feature_importance = pd.DataFrame({
                'feature': feature_names,
                'score': self.feature_selector.scores_,
                'p_value': self.feature_selector.pvalues_
            }).sort_values('score', ascending=False)

            print(f"特征选择后保留 {X_train_selected.shape[1]} 个特征")
        else:
            X_train_selected = X_train_processed
            X_test_selected = X_test_processed
            print(f"未进行特征选择，保留 {X_train_selected.shape[1]} 个特征（含组合特征）")

        return X_train_selected, X_test_selected, y_train, y_test

    def _get_feature_names(self):
        """获取预处理后的特征名称（支持 OneHot）"""
        if self.preprocessor is None:
            return []

        feature_names = []

        # 数值特征名
        if hasattr(self.preprocessor.named_transformers_['num'], 'get_feature_names_out'):
            num_names = self.preprocessor.named_transformers_['num'].get_feature_names_out()
            feature_names.extend(num_names)
        else:
            # 如果是 StandardScaler，直接使用列名
            num_cols = self.preprocessor.transformers_[0][2]
            feature_names.extend(num_cols)

        # 分类特征名（OneHot）
        cat_names = self.preprocessor.named_transformers_['cat'].get_feature_names_out()
        feature_names.extend(cat_names)

        return feature_names
    
    def save_preprocessor(self, path):
        """保存预处理器"""
        os.makedirs(os.path.dirname(path), exist_ok=True)
        joblib.dump(self.preprocessor, path)
        if self.feature_selector:
            joblib.dump(self.feature_selector, path.replace('.pkl', '_selector.pkl'))
    
    def load_preprocessor(self, path):
        """加载预处理器"""
        self.preprocessor = joblib.load(path)
        selector_path = path.replace('.pkl', '_selector.pkl')
        if os.path.exists(selector_path):
            self.feature_selector = joblib.load(selector_path)
    
    def get_feature_importance(self):
        """获取特征重要性"""
        return self.feature_importance
    
    def plot_feature_importance(self, top_n=20):
        """绘制特征重要性图"""
        if self.feature_importance is None:
            print("没有特征重要性数据")
            return
        
        top_features = self.feature_importance.head(top_n)
        
        plt.figure(figsize=(10, 8))
        sns.barplot(data=top_features, x='score', y='feature')
        plt.title(f'Top {top_n} 特征重要性')
        plt.xlabel('F-score')
        plt.tight_layout()
        
        # 保存图片
        os.makedirs('../results/figures', exist_ok=True)
        plt.savefig('../results/figures/feature_importance.png', dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"特征重要性图已保存到 ../results/figures/feature_importance.png")


if __name__ == '__main__':
    # 测试数据处理器
    processor = DataProcessor()
    
    # 加载数据
    train_data, test_data = processor.load_data('../data/train.csv', '../data/test2.csv')
    
    # 特征分析
    X, y = processor.analyze_features(train_data)
    
    # 数据预处理
    X_train, X_test, y_train, y_test = processor.preprocess_data(train_data, test_data)
    
    print(f"训练集形状: {X_train.shape}")
    print(f"测试集形状: {X_test.shape}")
    
    # 保存预处理器
    processor.save_preprocessor('../models/preprocessor.pkl')
    
    # 绘制特征重要性
    processor.plot_feature_importance() 