"""
数据处理模块，负责数据的加载、转换和清洗
"""
import pandas as pd
import numpy as np
from typing import Dict, List, Any, Optional, Union
import logging

from data.loaders.base_loader import BaseLoader
from data.processors.cleaner import DataCleaner
from data.processors.transformer import DataTransformer
from models.scene import Scene

logger = logging.getLogger(__name__)


class DataProcessor:
    """
    数据处理器，负责数据的加载、转换和清洗
    """
    
    def __init__(self):
        """
        初始化数据处理器
        """
        self.cleaner = DataCleaner()
        self.transformer = DataTransformer()
        self.data_cache = {}  # 缓存已加载的数据
    
    def load_data(self, scene: Scene, loaders: Dict[str, BaseLoader]) -> Dict[str, pd.DataFrame]:
        """
        加载场景所需的所有数据
        
        Args:
            scene: 场景对象
            loaders: 数据加载器字典，键为数据源ID，值为加载器对象
            
        Returns:
            加载的数据字典，键为数据源ID，值为DataFrame
        """
        data = {}
        
        # 遍历场景中的数据源
        for data_source in scene.data_sources:
            source_id = data_source["id"]
            
            # 检查是否有对应的加载器
            if source_id not in loaders:
                logger.warning(f"数据源 {source_id} 没有对应的加载器，跳过")
                continue
            
            try:
                # 使用加载器加载数据
                loader = loaders[source_id]
                df = loader.load()
                
                # 存储加载的数据
                data[source_id] = df
                
                # 缓存数据
                self.data_cache[source_id] = df
                
                logger.info(f"成功加载数据源 {source_id}，共 {len(df)} 行")
            except Exception as e:
                logger.error(f"加载数据源 {source_id} 失败: {str(e)}", exc_info=True)
                # 返回空DataFrame
                data[source_id] = pd.DataFrame()
        
        return data
    
    def clean_data(self, data: Dict[str, pd.DataFrame], cleaning_rules: Dict[str, List[Dict[str, Any]]]) -> Dict[str, pd.DataFrame]:
        """
        清洗数据
        
        Args:
            data: 原始数据字典
            cleaning_rules: 清洗规则字典，键为数据源ID，值为规则列表
            
        Returns:
            清洗后的数据字典
        """
        cleaned_data = {}
        
        for source_id, df in data.items():
            # 如果没有对应的清洗规则，直接使用原始数据
            if source_id not in cleaning_rules:
                cleaned_data[source_id] = df
                continue
            
            # 应用清洗规则
            rules = cleaning_rules[source_id]
            try:
                cleaned_df = self.cleaner.clean(df, rules)
                cleaned_data[source_id] = cleaned_df
                
                logger.info(f"成功清洗数据源 {source_id}")
            except Exception as e:
                logger.error(f"清洗数据源 {source_id} 失败: {str(e)}", exc_info=True)
                # 使用原始数据
                cleaned_data[source_id] = df
        
        return cleaned_data
    
    def transform_data(self, data: Dict[str, pd.DataFrame], transformation_rules: Dict[str, List[Dict[str, Any]]]) -> Dict[str, pd.DataFrame]:
        """
        转换数据
        
        Args:
            data: 清洗后的数据字典
            transformation_rules: 转换规则字典，键为数据源ID，值为规则列表
            
        Returns:
            转换后的数据字典
        """
        transformed_data = {}
        
        for source_id, df in data.items():
            # 如果没有对应的转换规则，直接使用原始数据
            if source_id not in transformation_rules:
                transformed_data[source_id] = df
                continue
            
            # 应用转换规则
            rules = transformation_rules[source_id]
            try:
                transformed_df = self.transformer.transform(df, rules)
                transformed_data[source_id] = transformed_df
                
                logger.info(f"成功转换数据源 {source_id}")
            except Exception as e:
                logger.error(f"转换数据源 {source_id} 失败: {str(e)}", exc_info=True)
                # 使用原始数据
                transformed_data[source_id] = df
        
        return transformed_data
    
    def preview_data(self, data: Union[pd.DataFrame, Dict[str, pd.DataFrame]], 
                    max_rows: int = 10) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
        """
        预览数据
        
        Args:
            data: 数据DataFrame或数据字典
            max_rows: 最大显示行数
            
        Returns:
            预览数据
        """
        if isinstance(data, pd.DataFrame):
            return data.head(max_rows)
        else:
            preview = {}
            for source_id, df in data.items():
                preview[source_id] = df.head(max_rows)
            return preview
    
    def get_data_summary(self, data: Union[pd.DataFrame, Dict[str, pd.DataFrame]]) -> Dict[str, Any]:
        """
        获取数据摘要信息
        
        Args:
            data: 数据DataFrame或数据字典
            
        Returns:
            数据摘要信息
        """
        if isinstance(data, pd.DataFrame):
            return self._get_single_df_summary(data)
        else:
            summary = {}
            for source_id, df in data.items():
                summary[source_id] = self._get_single_df_summary(df)
            return summary
    
    def _get_single_df_summary(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        获取单个DataFrame的摘要信息
        
        Args:
            df: 数据DataFrame
            
        Returns:
            摘要信息字典
        """
        # 基本信息
        summary = {
            "row_count": len(df),
            "column_count": len(df.columns),
            "columns": []
        }
        
        # 列信息
        for col in df.columns:
            col_info = {
                "name": col,
                "dtype": str(df[col].dtype),
                "missing_count": df[col].isna().sum(),
                "missing_percentage": round(df[col].isna().mean() * 100, 2)
            }
            
            # 数值型列的统计信息
            if pd.api.types.is_numeric_dtype(df[col]):
                col_info.update({
                    "min": df[col].min() if not df[col].empty else None,
                    "max": df[col].max() if not df[col].empty else None,
                    "mean": df[col].mean() if not df[col].empty else None,
                    "median": df[col].median() if not df[col].empty else None,
                    "std": df[col].std() if not df[col].empty else None
                })
            
            # 分类型或字符串列的统计信息
            elif pd.api.types.is_string_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):
                value_counts = df[col].value_counts().head(5).to_dict()
                col_info.update({
                    "unique_count": df[col].nunique(),
                    "top_values": value_counts
                })
            
            # 日期时间列的统计信息
            elif pd.api.types.is_datetime64_dtype(df[col]):
                col_info.update({
                    "min_date": df[col].min().strftime("%Y-%m-%d %H:%M:%S") if not df[col].empty else None,
                    "max_date": df[col].max().strftime("%Y-%m-%d %H:%M:%S") if not df[col].empty else None
                })
            
            summary["columns"].append(col_info)
        
        return summary
    
    def merge_data(self, data: Dict[str, pd.DataFrame], merge_config: Dict[str, Any]) -> pd.DataFrame:
        """
        合并多个数据源
        
        Args:
            data: 数据字典
            merge_config: 合并配置
            
        Returns:
            合并后的DataFrame
        """
        if not merge_config or len(data) < 2:
            # 如果没有合并配置或数据源少于2个，返回第一个数据源
            return next(iter(data.values())) if data else pd.DataFrame()
        
        # 获取主表和关联表
        main_source = merge_config.get("main_source")
        if main_source not in data:
            raise ValueError(f"主数据源 {main_source} 不存在")
        
        result_df = data[main_source].copy()
        
        # 遍历关联配置
        joins = merge_config.get("joins", [])
        for join_config in joins:
            join_source = join_config.get("source")
            if join_source not in data:
                logger.warning(f"关联数据源 {join_source} 不存在，跳过")
                continue
            
            join_type = join_config.get("type", "inner")
            left_on = join_config.get("left_on")
            right_on = join_config.get("right_on")
            
            if not left_on or not right_on:
                logger.warning(f"关联配置缺少必要的字段映射，跳过")
                continue
            
            # 执行合并
            try:
                result_df = pd.merge(
                    result_df,
                    data[join_source],
                    how=join_type,
                    left_on=left_on,
                    right_on=right_on,
                    suffixes=('', f'_{join_source}')
                )
                
                logger.info(f"成功关联数据源 {join_source}")
            except Exception as e:
                logger.error(f"关联数据源 {join_source} 失败: {str(e)}", exc_info=True)
        
        return result_df
    
    def filter_data(self, df: pd.DataFrame, filter_conditions: List[Dict[str, Any]]) -> pd.DataFrame:
        """
        根据条件筛选数据
        
        Args:
            df: 数据DataFrame
            filter_conditions: 筛选条件列表
            
        Returns:
            筛选后的DataFrame
        """
        if not filter_conditions:
            return df
        
        result_df = df.copy()
        
        for condition in filter_conditions:
            column = condition.get("column")
            operator = condition.get("operator")
            value = condition.get("value")
            
            if not column or not operator or column not in df.columns:
                continue
            
            try:
                if operator == "equals":
                    result_df = result_df[result_df[column] == value]
                elif operator == "not_equals":
                    result_df = result_df[result_df[column] != value]
                elif operator == "greater_than":
                    result_df = result_df[result_df[column] > value]
                elif operator == "less_than":
                    result_df = result_df[result_df[column] < value]
                elif operator == "greater_equals":
                    result_df = result_df[result_df[column] >= value]
                elif operator == "less_equals":
                    result_df = result_df[result_df[column] <= value]
                elif operator == "contains":
                    result_df = result_df[result_df[column].astype(str).str.contains(str(value), na=False)]
                elif operator == "starts_with":
                    result_df = result_df[result_df[column].astype(str).str.startswith(str(value), na=False)]
                elif operator == "ends_with":
                    result_df = result_df[result_df[column].astype(str).str.endswith(str(value), na=False)]
                elif operator == "is_null":
                    result_df = result_df[result_df[column].isna()]
                elif operator == "is_not_null":
                    result_df = result_df[~result_df[column].isna()]
                
                logger.info(f"应用筛选条件: {column} {operator} {value}")
            except Exception as e:
                logger.error(f"应用筛选条件失败: {str(e)}", exc_info=True)
        
        return result_df
    
    def aggregate_data(self, df: pd.DataFrame, aggregation_config: Dict[str, Any]) -> pd.DataFrame:
        """
        聚合数据
        
        Args:
            df: 数据DataFrame
            aggregation_config: 聚合配置
            
        Returns:
            聚合后的DataFrame
        """
        if not aggregation_config:
            return df
        
        group_by = aggregation_config.get("group_by", [])
        if not group_by:
            return df
        
        # 检查分组字段是否存在
        for col in group_by:
            if col not in df.columns:
                logger.warning(f"分组字段 {col} 不存在，跳过聚合")
                return df
        
        # 构建聚合函数字典
        agg_functions = {}
        for agg in aggregation_config.get("aggregations", []):
            column = agg.get("column")
            function = agg.get("function")
            
            if not column or not function or column not in df.columns:
                continue
            
            if column not in agg_functions:
                agg_functions[column] = []
            
            agg_functions[column].append(function)
        
        if not agg_functions:
            # 如果没有聚合函数，只进行分组
            return df.groupby(group_by).size().reset_index(name="count")
        
        try:
            # 执行聚合
            result_df = df.groupby(group_by).agg(agg_functions)
            
            # 重置索引和列名
            result_df.columns = ['_'.join(col).strip() for col in result_df.columns.values]
            result_df = result_df.reset_index()
            
            logger.info(f"成功聚合数据，分组字段: {group_by}")
            return result_df
        except Exception as e:
            logger.error(f"聚合数据失败: {str(e)}", exc_info=True)
            return df