# -*- coding: utf-8 -*-
"""
@Time ： 2025/10/14 14:51
@Auth ： zfw

"""
import pandas as pd
import numpy as np
from scipy import stats
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
import gc


def calculate_feature_importance(specific_df: pd.DataFrame,
                                 full_df: pd.DataFrame,
                                 n_top_features: int = 50,
                                 n_jobs: int = 4) -> list:
    """
    基于两个DataFrame计算特征重要性并返回top特征列表

    参数:
        specific_df (pd.DataFrame): 特定客群DataFrame
        full_df (pd.DataFrame): 总人群DataFrame
        n_top_features (int): 要选择的重要特征数量 (默认50)
        n_jobs (int): 并行线程数 (默认4)

    返回:
        list: 最重要的特征名称列表
    """
    # 设置日志
    logger = logging.getLogger('FeatureImportanceCalculator')
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler()
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    logger.info("开始计算特征重要性...")

    try:
        # 1. 数据预处理
        logger.info("开始数据预处理...")
        specific_df = preprocess_data(specific_df.copy())
        full_df = preprocess_data(full_df.copy())

        # 2. 确保列一致
        common_cols = list(set(specific_df.columns) & set(full_df.columns))
        specific_df = specific_df[common_cols]
        full_df = full_df[common_cols]
        logger.info(f"共有 {len(common_cols)} 个共同特征需要评估")

        # 3. 并行计算特征分数
        logger.info("开始计算特征群体差异分数...")
        feature_scores = {}

        # 使用线程池并行计算
        with ThreadPoolExecutor(max_workers=n_jobs) as executor:
            # 提交所有任务
            futures = {}
            for col in common_cols:
                future = executor.submit(
                    calculate_feature_score,
                    col,
                    specific_df[col],
                    full_df[col]
                )
                futures[future] = col

            # 收集结果
            for future in as_completed(futures):
                col = futures[future]
                try:
                    score = future.result()
                    feature_scores[col] = score
                except Exception as e:
                    logger.error(f"处理特征 {col} 时出错: {str(e)}")
                    feature_scores[col] = 0

        # 4. 排序并选择最重要的特征
        sorted_features = sorted(feature_scores.items(), key=lambda x: x[1], reverse=True)
        top_features = [feat for feat, score in sorted_features[:n_top_features]]

        logger.info(f"Top {n_top_features} 特征已选出")

        return top_features

    except Exception as e:
        logger.error(f"特征重要性计算失败: {str(e)}")
        raise
    finally:
        # 清理内存
        gc.collect()


def preprocess_data(df: pd.DataFrame) -> pd.DataFrame:
    """数据预处理优化"""
    # 日期特征 -> 时间戳（秒）
    date_cols = df.select_dtypes(include=['datetime64', 'datetime']).columns
    for col in date_cols:
        # 统一转换为时间戳
        if pd.api.types.is_datetime64_any_dtype(df[col]):
            # 处理 Pandas datetime64 类型
            df[col] = df[col].astype('int64') // 10 ** 9
        else:
            # 处理 Python datetime 类型
            df[col] = df[col].apply(lambda x: x.timestamp() if pd.notnull(x) else np.nan)

    # 字符串特征处理
    str_cols = df.select_dtypes(include='object').columns
    for col in str_cols:
        # 高基数特征使用频率编码
        if df[col].nunique() > 100:
            freq = df[col].value_counts()
            df[col] = df[col].map(freq)
        else:
            # 低基数特征使用分类编码
            df[col] = df[col].astype('category').cat.codes

    # 数值特征优化
    for col in df.select_dtypes(include=['int32', 'int64']).columns:
        df[col] = df[col].astype('int32')

    for col in df.select_dtypes(include='float64').columns:
        df[col] = df[col].astype('float32')

    # 处理Decimal类型
    dec_cols = [c for c in df.columns if 'decimal' in str(df[c].dtype).lower()]
    for col in dec_cols:
        df[col] = df[col].astype('float32')

    return df


def calculate_feature_score(col: str, group1: pd.Series, group2: pd.Series) -> float:
    """计算单个特征的群体差异分数"""
    try:
        # 处理缺失值
        group1 = group1.dropna()
        group2 = group2.dropna()

        if len(group1) == 0 or len(group2) == 0:
            return 0

        # 数值型特征：使用标准化均值差异
        if np.issubdtype(group1.dtype, np.number):
            if len(group1) < 2 or len(group2) < 2:
                return 0

            mean_diff = np.abs(np.mean(group1) - np.mean(group2))
            std1 = np.std(group1, ddof=1)
            std2 = np.std(group2, ddof=1)

            # 处理零标准差情况
            if std1 == 0 and std2 == 0:
                return mean_diff

            pooled_std = np.sqrt((std1 ** 2 + std2 ** 2) / 2)
            return mean_diff / pooled_std if pooled_std > 0 else mean_diff

        # 分类型特征：使用卡方统计量
        else:
            # 创建列联表
            cont_table = pd.crosstab(
                index=pd.Series(['Group1'] * len(group1) + ['Group2'] * len(group2)),
                columns=pd.concat([group1, group2]),
                dropna=False
            ).values

            # 计算卡方统计量
            try:
                chi2, _, _, _ = stats.chi2_contingency(cont_table)
                return chi2 / (len(group1) + len(group2))
            except:
                return 0
    except Exception as e:
        raise RuntimeError(f"计算特征 {col} 时出错: {str(e)}")


def handle_duplicate_columns(df: pd.DataFrame) -> pd.DataFrame:
    """处理重复列名"""
    # 检查是否有重复列名
    if df.columns.duplicated().any():
        # 获取重复列名
        duplicated_cols = df.columns[df.columns.duplicated()]

        # 重命名重复列
        new_columns = []
        count_dict = {}

        for col in df.columns:
            if col in count_dict:
                count_dict[col] += 1
                new_columns.append(f"{col}_{count_dict[col]}")
            else:
                count_dict[col] = 1
                new_columns.append(col)

        df.columns = new_columns

        # 记录处理结果
        print(f"发现并处理了 {len(duplicated_cols)} 个重复列名")

    return df