#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
CSV文件数据清洗和特征提取示例
"""

import logging
from pathlib import Path

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, StandardScaler

# 配置日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class DataProcessor:
    def __init__(self, input_dir: str, output_dir: str):
        """
        初始化数据处理器

        Args:
            input_dir: 输入CSV文件目录
            output_dir: 输出处理后的文件目录
        """
        self.input_dir = Path(input_dir)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # 初始化数据清洗工具
        self.numeric_imputer = SimpleImputer(strategy='mean')
        self.categorical_imputer = SimpleImputer(strategy='most_frequent')
        self.scaler = StandardScaler()

    def load_data(self, file_pattern: str = "*.csv") -> pd.DataFrame:
        """
        加载所有匹配的CSV文件并合并

        Args:
            file_pattern: 文件匹配模式

        Returns:
            合并后的DataFrame
        """
        all_files = list(self.input_dir.glob(file_pattern))
        if not all_files:
            raise FileNotFoundError(f"在 {self.input_dir} 中没有找到匹配的CSV文件")

        dfs = []
        for file in all_files:
            logger.info(f"正在读取文件: {file}")
            df = pd.read_csv(file)
            dfs.append(df)

        return pd.concat(dfs, ignore_index=True)

    def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        数据清洗

        Args:
            df: 输入DataFrame

        Returns:
            清洗后的DataFrame
        """
        # 删除重复行
        df_cleaned = df.drop_duplicates()

        # 处理缺失值
        numeric_cols = df_cleaned.select_dtypes(include=[np.number]).columns
        categorical_cols = df_cleaned.select_dtypes(include=['object']).columns

        if len(numeric_cols) > 0:
            df_cleaned[numeric_cols] = self.numeric_imputer.fit_transform(
                df_cleaned[numeric_cols])

        if len(categorical_cols) > 0:
            df_cleaned[categorical_cols] = self.categorical_imputer.fit_transform(
                df_cleaned[categorical_cols])

        return df_cleaned

    def extract_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        特征提取

        Args:
            df: 输入DataFrame

        Returns:
            处理后的DataFrame
        """
        # 标准化数值特征
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            df[numeric_cols] = self.scaler.fit_transform(df[numeric_cols])

        # 对分类特征进行编码
        categorical_cols = df.select_dtypes(include=['object']).columns
        for col in categorical_cols:
            df[f"{col}_encoded"] = LabelEncoder().fit_transform(df[col])

        return df

    def generate_statistics(self, df: pd.DataFrame) -> None:
        """
        生成数据统计报告

        Args:
            df: 输入DataFrame
        """
        # 基本统计信息
        stats = df.describe()
        stats.to_csv(self.output_dir / "statistics.csv")

        # 相关性分析
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            corr = df[numeric_cols].corr()
            plt.figure(figsize=(10, 8))
            sns.heatmap(corr, annot=True, cmap='coolwarm')
            plt.title("特征相关性热力图")
            plt.tight_layout()
            plt.savefig(self.output_dir / "correlation_heatmap.png")
            plt.close()

    def process_data(self) -> None:
        """
        执行完整的数据处理流程
        """
        try:
            # 加载数据
            df = self.load_data()
            logger.info(f"成功加载数据，形状: {df.shape}")

            # 数据清洗
            df_cleaned = self.clean_data(df)
            logger.info(f"数据清洗完成，形状: {df_cleaned.shape}")

            # 特征提取
            df_processed = self.extract_features(df_cleaned)
            logger.info(f"特征提取完成，形状: {df_processed.shape}")

            # 生成统计报告
            self.generate_statistics(df_processed)
            logger.info("统计报告生成完成")

            # 保存处理后的数据
            df_processed.to_csv(
                self.output_dir / "processed_data.csv", index=False)
            logger.info("处理后的数据已保存")

        except Exception as e:
            logger.error(f"数据处理过程中发生错误: {str(e)}")
            raise


def main():
    # 示例使用
    processor = DataProcessor(
        input_dir="data/raw",
        output_dir="data/processed"
    )
    processor.process_data()


if __name__ == "__main__":
    main()
