import pandas as pd
import numpy as np
import logging
import os
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2, SelectKBest, mutual_info_classif, VarianceThreshold
from sklearn.preprocessing import LabelEncoder, StandardScaler
from imblearn.over_sampling import SMOTE, ADASYN
from collections import Counter

from base import getEnvPath

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('data_processing.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def load_data(file_path):
    """加载数据集并进行初步分析"""
    logger.info(f"开始加载数据集: {file_path}")

    # 检查文件是否存在
    if not os.path.exists(file_path):
        logger.error(f"文件不存在: {file_path}")
        raise FileNotFoundError(f"文件不存在: {file_path}")

    # 加载数据
    df = pd.read_csv(file_path)

    # 打印数据基本信息
    rows, cols = df.shape
    logger.info(f"数据集总行数: {rows}, 总列数: {cols}")
    logger.info(f"列名: {', '.join(df.columns.tolist())}")

    return df


def clean_data(df):
    """数据清洗：处理缺失值、重复值和异常值"""
    logger.info("开始数据清洗...")

    # 记录原始数据行数
    original_rows = df.shape[0]
    logger.info(f"原始数据行数: {original_rows}")

    # 检查缺失值
    missing_values = df.isnull().sum()
    missing_cols = missing_values[missing_values > 0]
    if len(missing_cols) > 0:
        logger.info("发现缺失值:")
        for col, count in missing_cols.items():
            logger.info(f"  - {col}: {count} 个缺失值 ({count / original_rows * 100:.2f}%)")

        # 处理缺失值 - 数值型用中位数填充，分类型用众数填充
        numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
        categorical_cols = df.select_dtypes(include=['object']).columns

        for col in numeric_cols:
            if missing_values[col] > 0:
                median_value = df[col].median()
                df[col].fillna(median_value, inplace=True)
                logger.info(f"  - 已用中位数 {median_value} 填充 {col} 的缺失值")

        for col in categorical_cols:
            if missing_values[col] > 0:
                mode_value = df[col].mode()[0]
                df[col].fillna(mode_value, inplace=True)
                logger.info(f"  - 已用众数 '{mode_value}' 填充 {col} 的缺失值")
    else:
        logger.info("数据集中没有缺失值")

    # 检查并处理重复值
    duplicates = df.duplicated().sum()
    if duplicates > 0:
        logger.info(f"发现 {duplicates} 行重复数据 ({duplicates / original_rows * 100:.2f}%)")
        df.drop_duplicates(inplace=True)
        logger.info(f"已删除重复数据，剩余 {df.shape[0]} 行")
    else:
        logger.info("数据集中没有重复行")

    # 处理异常值 - 使用IQR方法检测数值型特征的异常值
    numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
    outlier_counts = {}

    for col in numeric_cols:
        # 跳过Label列
        if col == 'Label':
            continue

        Q1 = df[col].quantile(0.25)
        Q3 = df[col].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR

        outliers = df[(df[col] < lower_bound) | (df[col] > upper_bound)]
        outlier_count = len(outliers)

        if outlier_count > 0:
            outlier_counts[col] = outlier_count

            # 对异常值进行处理 - 将其限制在边界范围内
            df[col] = df[col].clip(lower=lower_bound, upper=upper_bound)

    if outlier_counts:
        logger.info("发现并处理了以下特征的异常值:")
        for col, count in outlier_counts.items():
            logger.info(f"  - {col}: {count} 个异常值 ({count / original_rows * 100:.2f}%)")
    else:
        logger.info("未发现明显的异常值")

    # 编码分类特征
    categorical_cols = df.select_dtypes(include=['object']).columns
    label_encoders = {}

    for col in categorical_cols:
        if col != 'Label':  # 不对标签列进行编码
            le = LabelEncoder()
            df[col] = le.fit_transform(df[col])
            label_encoders[col] = le
            logger.info(f"已对分类特征 '{col}' 进行标签编码")

    logger.info(f"数据清洗完成，最终数据集大小: {df.shape[0]} 行, {df.shape[1]} 列")
    return df, label_encoders


def main():
    """主函数：执行完整的数据处理流程"""
    path = getEnvPath()
    # 设置数据文件路径
    data_file = path + "data/dataset_test.csv"

    # 1. 加载数据
    df = load_data(data_file)
    # 2. 数据清洗
    cleaned_df, _ = clean_data(df)

    # 3. 特征选择
    selected_features = []
    with open(path + "data/output_dir/selected_features.txt", "r") as f:
        while True:
            line = f.readline()
            if not line:  # 到达文件末尾
                break
            line = line.replace("\n", "")
            selected_features.append(line)

    selected_df = df[selected_features]

    # 7. 保存处理后的数据
    output_dir = path + "data/output_dir"
    os.makedirs(output_dir, exist_ok=True)

    selected_df.to_csv(f"{output_dir}/test_data.csv", index=False)

    logger.info("测试数据预处理完成")


if __name__ == "__main__":
    main()
