import pandas as pd
import numpy as np
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2, SelectKBest, mutual_info_classif
from sklearn.preprocessing import LabelEncoder, StandardScaler
from imblearn.over_sampling import SMOTE, ADASYN
from collections import Counter

from base import getEnvPath

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('data_processing.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def load_data(file_path):
    """加载数据集并进行初步分析"""
    logger.info(f"开始加载数据集: {file_path}")

    # 检查文件是否存在
    if not os.path.exists(file_path):
        logger.error(f"文件不存在: {file_path}")
        raise FileNotFoundError(f"文件不存在: {file_path}")

    # 加载数据
    df = pd.read_csv(file_path)

    # 打印数据基本信息
    rows, cols = df.shape
    logger.info(f"数据集总行数: {rows}, 总列数: {cols}")
    logger.info(f"列名: {', '.join(df.columns.tolist())}")

    # 标签分布统计
    label_counts = df['Label'].value_counts()
    logger.info("原始标签分布统计:")
    for label, count in label_counts.items():
        logger.info(f"  - {label}: {count} ({count / rows * 100:.2f}%)")

    return df


def clean_data(df):
    """数据清洗：处理缺失值、重复值和异常值"""
    logger.info("开始数据清洗...")

    # 记录原始数据行数
    original_rows = df.shape[0]
    logger.info(f"原始数据行数: {original_rows}")

    # 检查缺失值
    missing_values = df.isnull().sum()
    missing_cols = missing_values[missing_values > 0]
    if len(missing_cols) > 0:
        logger.info("发现缺失值:")
        for col, count in missing_cols.items():
            logger.info(f"  - {col}: {count} 个缺失值 ({count / original_rows * 100:.2f}%)")

        # 处理缺失值 - 数值型用中位数填充，分类型用众数填充
        numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
        categorical_cols = df.select_dtypes(include=['object']).columns

        for col in numeric_cols:
            if missing_values[col] > 0:
                median_value = df[col].median()
                df[col].fillna(median_value, inplace=True)
                logger.info(f"  - 已用中位数 {median_value} 填充 {col} 的缺失值")

        for col in categorical_cols:
            if missing_values[col] > 0:
                mode_value = df[col].mode()[0]
                df[col].fillna(mode_value, inplace=True)
                logger.info(f"  - 已用众数 '{mode_value}' 填充 {col} 的缺失值")
    else:
        logger.info("数据集中没有缺失值")

    # 检查并处理重复值
    duplicates = df.duplicated().sum()
    if duplicates > 0:
        logger.info(f"发现 {duplicates} 行重复数据 ({duplicates / original_rows * 100:.2f}%)")
        df.drop_duplicates(inplace=True)
        logger.info(f"已删除重复数据，剩余 {df.shape[0]} 行")
    else:
        logger.info("数据集中没有重复行")

    # 处理异常值 - 使用IQR方法检测数值型特征的异常值
    numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
    outlier_counts = {}

    for col in numeric_cols:
        # 跳过Label列
        if col == 'Label':
            continue

        Q1 = df[col].quantile(0.25)
        Q3 = df[col].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR

        outliers = df[(df[col] < lower_bound) | (df[col] > upper_bound)]
        outlier_count = len(outliers)

        if outlier_count > 0:
            outlier_counts[col] = outlier_count

            # 对异常值进行处理 - 将其限制在边界范围内
            df[col] = df[col].clip(lower=lower_bound, upper=upper_bound)

    if outlier_counts:
        logger.info("发现并处理了以下特征的异常值:")
        for col, count in outlier_counts.items():
            logger.info(f"  - {col}: {count} 个异常值 ({count / original_rows * 100:.2f}%)")
    else:
        logger.info("未发现明显的异常值")

    # 编码分类特征
    categorical_cols = df.select_dtypes(include=['object']).columns
    label_encoders = {}

    for col in categorical_cols:
        if col != 'Label':  # 不对标签列进行编码
            le = LabelEncoder()
            df[col] = le.fit_transform(df[col])
            label_encoders[col] = le
            logger.info(f"已对分类特征 '{col}' 进行标签编码")

    logger.info(f"数据清洗完成，最终数据集大小: {df.shape[0]} 行, {df.shape[1]} 列")
    return df, label_encoders


def select_features(df, target_col='Label', method='both', k=20):
    """特征选择：使用卡方检验和互信息方法选择重要特征"""
    logger.info("开始特征选择...")

    # 分离特征和标签

    X = df.drop(columns=[target_col])
    y = df[target_col]

    # 对标签进行编码
    le = LabelEncoder()
    y_encoded = le.fit_transform(y)

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    feature_names = X.columns.tolist()
    selected_features = []

    if method in ['chi2', 'both']:
        logger.info("使用卡方检验进行特征选择...")
        # 确保所有特征为非负值（卡方检验要求）
        X_chi2 = X_scaled - X_scaled.min(axis=0)

        # 应用卡方检验
        chi2_selector = SelectKBest(chi2, k=k)
        chi2_selector.fit(X_chi2, y_encoded)

        # 获取特征得分和p值
        chi2_scores = chi2_selector.scores_
        chi2_pvalues = chi2_selector.pvalues_

        # 创建特征重要性数据框
        chi2_feature_importance = pd.DataFrame({
            'Feature': feature_names,
            'Chi2_Score': chi2_scores,
            'Chi2_P_Value': chi2_pvalues
        })
        chi2_feature_importance = chi2_feature_importance.sort_values('Chi2_Score', ascending=False)

        # 记录卡方检验结果
        logger.info("卡方检验特征重要性排名 (前10):")
        for i, (_, row) in enumerate(chi2_feature_importance.head(10).iterrows()):
            logger.info(f"  {i + 1}. {row['Feature']}: 得分={row['Chi2_Score']:.4f}, p值={row['Chi2_P_Value']:.6f}")

        # 获取卡方检验选择的特征
        chi2_selected_features = chi2_feature_importance.head(k)['Feature'].tolist()
        selected_features.extend(chi2_selected_features)

    if method in ['mutual_info', 'both']:
        logger.info("使用互信息方法进行特征选择...")

        # 优化1: 如果数据量太大，可以考虑采样
        sample_size = min(10000, X_scaled.shape[0])
        if X_scaled.shape[0] > sample_size:
            logger.info(f"数据量较大，从{X_scaled.shape[0]}行随机采样{sample_size}行进行互信息计算")
            indices = np.random.choice(X_scaled.shape[0], sample_size, replace=False)
            X_sample = X_scaled[indices]
            y_sample = y_encoded[indices]
        else:
            X_sample = X_scaled
            y_sample = y_encoded

        # 优化2: 设置n_jobs参数使用多核并行计算
        try:
            logger.info("开始计算互信息，这可能需要一些时间...")
            mi_scores = mutual_info_classif(X_sample, y_sample, n_jobs=-1, random_state=42)

            # 创建特征重要性数据框
            mi_feature_importance = pd.DataFrame({
                'Feature': feature_names,
                'Mutual_Info_Score': mi_scores
            })
            mi_feature_importance = mi_feature_importance.sort_values('Mutual_Info_Score', ascending=False)

            # 记录互信息结果
            logger.info("互信息特征重要性排名 (前10):")
            for i, (_, row) in enumerate(mi_feature_importance.head(10).iterrows()):
                logger.info(f"  {i + 1}. {row['Feature']}: 得分={row['Mutual_Info_Score']:.4f}")

            # 获取互信息选择的特征
            mi_selected_features = mi_feature_importance.head(k)['Feature'].tolist()
            selected_features.extend(mi_selected_features)
        except Exception as e:
            logger.error(f"互信息计算失败: {str(e)}")
            logger.warning("跳过互信息特征选择，仅使用卡方检验结果")
            # 如果互信息计算失败，就只使用卡方检验的结果
            if method == 'mutual_info':
                logger.warning("由于互信息计算失败且未指定其他方法，将使用所有特征")
                selected_features = feature_names

    # 如果使用两种方法，则取并集并去重
    if method == 'both':
        selected_features = list(set(selected_features))

    logger.info(f"特征选择完成，选择了 {len(selected_features)} 个特征")

    # 返回包含选定特征的数据框
    selected_df = df[selected_features + [target_col]]
    return selected_df, selected_features


def balance_data(df, target_col='Label', method='smote'):
    """数据平衡：使用过采样方法平衡数据集"""
    logger.info("开始数据平衡处理...")

    # 分离特征和标签
    X = df.drop(columns=[target_col])
    y = df[target_col]

    # 记录原始类别分布
    original_distribution = Counter(y)
    logger.info("原始类别分布:")
    for label, count in original_distribution.items():
        logger.info(f"  - {label}: {count} ({count / len(y) * 100:.2f}%)")

    # 应用平衡方法
    if method.lower() == 'smote':
        logger.info("使用SMOTE方法进行过采样...")
        sampler = SMOTE(random_state=42)
    elif method.lower() == 'adasyn':
        logger.info("使用ADASYN方法进行过采样...")
        sampler = ADASYN(random_state=42)
    else:
        logger.error(f"不支持的平衡方法: {method}")
        raise ValueError(f"不支持的平衡方法: {method}")

    X_resampled, y_resampled = sampler.fit_resample(X, y)

    # 记录平衡后的类别分布
    balanced_distribution = Counter(y_resampled)
    logger.info("平衡后的类别分布:")
    for label, count in balanced_distribution.items():
        logger.info(f"  - {label}: {count} ({count / len(y_resampled) * 100:.2f}%)")

    # 创建平衡后的数据框
    balanced_df = pd.DataFrame(X_resampled, columns=X.columns)
    balanced_df[target_col] = y_resampled

    logger.info(f"数据平衡完成，平衡后数据集大小: {balanced_df.shape[0]} 行, {balanced_df.shape[1]} 列")
    return balanced_df


def split_data(df, target_col='Label', test_size=0.2):
    """将数据集分割为训练集和验证集"""
    logger.info("开始数据集分割...")

    # 分离特征和标签
    X = df.drop(columns=[target_col])
    y = df[target_col]

    # 分割数据
    X_train, X_val, y_train, y_val = train_test_split(
        X, y, test_size=test_size, random_state=42, stratify=y
    )

    # 创建训练集和验证集数据框
    train_df = pd.DataFrame(X_train, columns=X.columns)
    train_df[target_col] = y_train

    val_df = pd.DataFrame(X_val, columns=X.columns)
    val_df[target_col] = y_val

    # 记录分割结果
    logger.info(f"数据集分割完成:")
    logger.info(f"  - 训练集: {train_df.shape[0]} 行 ({train_df.shape[0] / df.shape[0] * 100:.2f}%)")
    logger.info(f"  - 验证集: {val_df.shape[0]} 行 ({val_df.shape[0] / df.shape[0] * 100:.2f}%)")

    # 验证分割是否正确
    train_distribution = train_df[target_col].value_counts(normalize=True)
    val_distribution = val_df[target_col].value_counts(normalize=True)

    logger.info("训练集标签分布:")
    for label, prop in train_distribution.items():
        count = train_df[train_df[target_col] == label].shape[0]
        logger.info(f"  - {label}: {count} ({prop * 100:.2f}%)")

    logger.info("验证集标签分布:")
    for label, prop in val_distribution.items():
        count = val_df[val_df[target_col] == label].shape[0]
        logger.info(f"  - {label}: {count} ({prop * 100:.2f}%)")

    return train_df, val_df


def verify_data_processing(train_df, val_df, original_df):
    """验证数据处理是否正确"""
    logger.info("开始验证数据处理结果...")

    # 验证总行数
    total_processed = train_df.shape[0] + val_df.shape[0]
    logger.info(f"原始数据集行数: {original_df.shape[0]}")
    logger.info(f"处理后数据集总行数: {total_processed}")

    # 验证特征一致性
    train_features = set(train_df.columns)
    val_features = set(val_df.columns)

    if train_features == val_features:
        logger.info("训练集和验证集的特征一致")
    else:
        logger.error("训练集和验证集的特征不一致")
        logger.error(f"训练集特有特征: {train_features - val_features}")
        logger.error(f"验证集特有特征: {val_features - train_features}")

    # 验证是否有数据泄露
    train_indices = set(train_df.index)
    val_indices = set(val_df.index)
    overlap = train_indices.intersection(val_indices)

    if len(overlap) == 0:
        logger.info("训练集和验证集没有重叠样本")
    else:
        logger.error(f"发现 {len(overlap)} 个重叠样本")

    # 验证标签分布
    train_label_dist = train_df['Label'].value_counts(normalize=True)
    val_label_dist = val_df['Label'].value_counts(normalize=True)

    logger.info("标签分布比较 (训练集 vs 验证集):")
    all_labels = sorted(set(train_label_dist.index) | set(val_label_dist.index))

    for label in all_labels:
        train_pct = train_label_dist.get(label, 0) * 100
        val_pct = val_label_dist.get(label, 0) * 100
        diff = abs(train_pct - val_pct)

        status = "正常" if diff < 5 else "警告"
        logger.info(f"  - {label}: 训练集={train_pct:.2f}%, 验证集={val_pct:.2f}%, 差异={diff:.2f}% ({status})")

    logger.info("数据处理验证完成")
    return True


def main():
    """主函数：执行完整的数据处理流程"""
    path = getEnvPath()
    # 设置数据文件路径
    data_file = path + "data/dataset_train.csv"

    # 1. 加载数据
    df = load_data(data_file)
    original_df = df.copy()

    # 2. 数据清洗
    cleaned_df, _ = clean_data(df)

    # 3. 特征选择
    try:
        # 添加超时处理
        import signal

        class TimeoutException(Exception):
            pass

        def timeout_handler(signum, frame):
            raise TimeoutException("特征选择超时")

        # 设置超时时间（秒）
        feature_selection_timeout = 300  # 5分钟

        # 仅在Unix系统上设置超时（Windows不支持signal.SIGALRM）
        if os.name != 'nt':  # 非Windows系统
            signal.signal(signal.SIGALRM, timeout_handler)
            signal.alarm(feature_selection_timeout)

        try:
            selected_df, selected_features = select_features(cleaned_df, method='both', k=30)
        except TimeoutException:
            logger.warning(f"特征选择超时（超过{feature_selection_timeout}秒），使用卡方检验方法")
            selected_df, selected_features = select_features(cleaned_df, method='chi2', k=30)
        finally:
            if os.name != 'nt':
                signal.alarm(0)  # 取消超时
    except Exception as e:
        logger.error(f"特征选择失败: {str(e)}")
        logger.warning("跳过特征选择，使用所有特征")
        selected_df = cleaned_df
        selected_features = cleaned_df.drop(columns=['Label']).columns.tolist()

    # 4. 数据平衡
    # balanced_df = balance_data(selected_df, method='smote')

    # 5. 数据分割
    train_df, val_df = split_data(selected_df)

    # 6. 验证数据处理
    verify_data_processing(train_df, val_df, original_df)

    # 7. 保存处理后的数据
    output_dir = path + "data/output_dir"
    os.makedirs(output_dir, exist_ok=True)

    train_df.to_csv(f"{output_dir}/train_data.csv", index=False)
    val_df.to_csv(f"{output_dir}/validation_data.csv", index=False)

    # 保存选择的特征列表
    with open(f"{output_dir}/selected_features.txt", 'w') as f:
        for feature in selected_features:
            f.write(f"{feature}\n")

    logger.info(f"处理后的数据已保存到 {output_dir} 目录")
    logger.info("数据预处理与特征工程流程完成")


if __name__ == "__main__":
    main()
