"""
数据处理器
用于智能处理大量数据，避免直接传递大量数据给大模型
"""

import json
import pandas as pd
import numpy as np
from typing import List, Dict, Any, Tuple
from loguru import logger
import random


class DataProcessor:
    """数据处理器类，用于智能处理大量数据"""
    
    # 数据量阈值配置
    MAX_ROWS_DIRECT = 200        # 直接传递的最大行数
    MAX_ROWS_SAMPLE = 1000       # 采样处理的最大行数
    MAX_ROWS_AGGREGATE = 10000   # 聚合处理的最大行数
    SAMPLE_SIZE = 50             # 采样数量
    
    @classmethod
    def process_task_data(cls, task_data: List[Dict[str, Any]], task_description: str = "") -> Dict[str, Any]:
        """
        智能处理任务数据
        
        参数:
            task_data: 任务数据列表
            task_description: 任务描述，用于指导处理策略
            
        返回:
            Dict: 处理后的数据和元信息
        """
        if not task_data:
            return {
                "processed_data": [],
                "data_summary": "数据为空",
                "processing_strategy": "none",
                "original_count": 0,
                "processed_count": 0
            }
        
        original_count = len(task_data)
        logger.info(f"开始处理数据，原始数据量: {original_count} 条")
        
        # 根据数据量选择处理策略
        if original_count <= cls.MAX_ROWS_DIRECT:
            # 策略1：数据量小，直接传递
            return cls._direct_process(task_data, task_description)
            
        elif original_count <= cls.MAX_ROWS_SAMPLE:
            # 策略2：中等数据量，采样处理
            return cls._sample_process(task_data, task_description)
            
        elif original_count <= cls.MAX_ROWS_AGGREGATE:
            # 策略3：大数据量，聚合处理
            return cls._aggregate_process(task_data, task_description)
            
        else:
            # 策略4：超大数据量，统计摘要
            return cls._summary_process(task_data, task_description)
    
    @classmethod
    def _direct_process(cls, task_data: List[Dict[str, Any]], task_description: str) -> Dict[str, Any]:
        """策略1：直接处理小数据量"""
        
        logger.info("使用直接处理策略")
        
        return {
            "processed_data": task_data,
            "data_summary": f"数据量较小({len(task_data)}条)，直接传递完整数据进行分析",
            "processing_strategy": "direct",
            "original_count": len(task_data),
            "processed_count": len(task_data),
            "data_preview": task_data[:5] if len(task_data) > 5 else task_data
        }
    
    @classmethod
    def _sample_process(cls, task_data: List[Dict[str, Any]], task_description: str) -> Dict[str, Any]:
        """策略2：采样处理中等数据量"""
        
        logger.info("使用采样处理策略")
        
        # 智能采样
        sample_data = cls._intelligent_sampling(task_data, cls.SAMPLE_SIZE)
        
        # 生成数据概览
        data_overview = cls._generate_data_overview(task_data)
        
        return {
            "processed_data": sample_data,
            "data_summary": f"原始数据{len(task_data)}条，采样{len(sample_data)}条代表性数据进行分析",
            "processing_strategy": "sample",
            "original_count": len(task_data),
            "processed_count": len(sample_data),
            "data_overview": data_overview,
            "sampling_method": "intelligent"
        }
    
    @classmethod
    def _aggregate_process(cls, task_data: List[Dict[str, Any]], task_description: str) -> Dict[str, Any]:
        """策略3：聚合处理大数据量"""
        
        logger.info("使用聚合处理策略")
        
        try:
            # 转换为DataFrame进行聚合分析
            df = pd.DataFrame(task_data)
            
            # 智能聚合
            aggregated_data = cls._intelligent_aggregation(df, task_description)
            
            # 生成统计摘要
            statistical_summary = cls._generate_statistical_summary(df)
            
            return {
                "processed_data": aggregated_data,
                "data_summary": f"原始数据{len(task_data)}条，聚合为{len(aggregated_data)}条汇总数据",
                "processing_strategy": "aggregate",
                "original_count": len(task_data),
                "processed_count": len(aggregated_data),
                "statistical_summary": statistical_summary,
                "aggregation_method": "intelligent"
            }
            
        except Exception as e:
            logger.error(f"聚合处理失败: {str(e)}")
            # 降级到采样处理
            return cls._sample_process(task_data, task_description)
    
    @classmethod
    def _summary_process(cls, task_data: List[Dict[str, Any]], task_description: str) -> Dict[str, Any]:
        """策略4：统计摘要处理超大数据量"""
        
        logger.info("使用统计摘要处理策略")
        
        try:
            # 转换为DataFrame
            df = pd.DataFrame(task_data)
            
            # 生成详细统计摘要
            detailed_summary = cls._generate_detailed_summary(df)
            
            # 提取关键洞察
            key_insights = cls._extract_key_insights(df, task_description)
            
            # 生成少量代表性样本
            representative_samples = cls._intelligent_sampling(task_data, 20)
            
            return {
                "processed_data": representative_samples,
                "data_summary": f"超大数据量({len(task_data)}条)，生成统计摘要和关键洞察",
                "processing_strategy": "summary",
                "original_count": len(task_data),
                "processed_count": len(representative_samples),
                "detailed_summary": detailed_summary,
                "key_insights": key_insights,
                "representative_samples": representative_samples
            }
            
        except Exception as e:
            logger.error(f"统计摘要处理失败: {str(e)}")
            # 降级到简单采样
            return cls._sample_process(task_data, task_description)
    
    @classmethod
    def _intelligent_sampling(cls, data: List[Dict[str, Any]], sample_size: int) -> List[Dict[str, Any]]:
        """智能采样"""
        
        if len(data) <= sample_size:
            return data
        
        try:
            df = pd.DataFrame(data)
            
            # 策略1：如果有数值列，按数值分布采样
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            if len(numeric_columns) > 0:
                # 按主要数值列分层采样
                main_numeric_col = numeric_columns[0]
                return cls._stratified_sampling(df, main_numeric_col, sample_size)
            
            # 策略2：如果有分类列，按分类采样
            categorical_columns = df.select_dtypes(include=['object']).columns
            if len(categorical_columns) > 0:
                main_cat_col = categorical_columns[0]
                return cls._categorical_sampling(df, main_cat_col, sample_size)
            
            # 策略3：随机采样
            return random.sample(data, sample_size)
            
        except Exception as e:
            logger.warning(f"智能采样失败，使用随机采样: {str(e)}")
            return random.sample(data, sample_size)
    
    @classmethod
    def _stratified_sampling(cls, df: pd.DataFrame, column: str, sample_size: int) -> List[Dict[str, Any]]:
        """按数值列分层采样"""
        
        try:
            # 将数值列分为几个区间
            df['_quartile'] = pd.qcut(df[column], q=4, duplicates='drop')
            
            # 按区间采样
            samples = []
            quartiles = df['_quartile'].unique()
            samples_per_quartile = max(1, sample_size // len(quartiles))
            
            for quartile in quartiles:
                quartile_data = df[df['_quartile'] == quartile]
                n_samples = min(samples_per_quartile, len(quartile_data))
                quartile_samples = quartile_data.sample(n=n_samples)
                samples.extend(quartile_samples.drop('_quartile', axis=1).to_dict('records'))
            
            # 如果还需要更多样本，随机补充
            if len(samples) < sample_size:
                remaining = sample_size - len(samples)
                additional_samples = df.drop('_quartile', axis=1).sample(n=remaining)
                samples.extend(additional_samples.to_dict('records'))
            
            return samples[:sample_size]
            
        except Exception as e:
            logger.warning(f"分层采样失败: {str(e)}")
            return df.sample(n=sample_size).to_dict('records')
    
    @classmethod
    def _categorical_sampling(cls, df: pd.DataFrame, column: str, sample_size: int) -> List[Dict[str, Any]]:
        """按分类列采样"""
        
        try:
            # 按分类值采样
            categories = df[column].unique()
            samples_per_category = max(1, sample_size // len(categories))
            
            samples = []
            for category in categories:
                category_data = df[df[column] == category]
                n_samples = min(samples_per_category, len(category_data))
                category_samples = category_data.sample(n=n_samples)
                samples.extend(category_samples.to_dict('records'))
            
            return samples[:sample_size]
            
        except Exception as e:
            logger.warning(f"分类采样失败: {str(e)}")
            return df.sample(n=sample_size).to_dict('records')
    
    @classmethod
    def _intelligent_aggregation(cls, df: pd.DataFrame, task_description: str) -> List[Dict[str, Any]]:
        """智能聚合"""
        
        try:
            aggregated_results = []
            
            # 识别可能的分组列
            categorical_columns = df.select_dtypes(include=['object']).columns
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            
            if len(categorical_columns) > 0 and len(numeric_columns) > 0:
                # 按分类列分组聚合
                group_col = categorical_columns[0]
                
                for numeric_col in numeric_columns[:3]:  # 最多处理3个数值列
                    agg_data = df.groupby(group_col)[numeric_col].agg(['count', 'mean', 'sum', 'min', 'max']).reset_index()
                    
                    for _, row in agg_data.iterrows():
                        aggregated_results.append({
                            '分组字段': group_col,
                            '分组值': row[group_col],
                            '指标字段': numeric_col,
                            '记录数': int(row['count']),
                            '平均值': round(float(row['mean']), 2),
                            '总和': round(float(row['sum']), 2),
                            '最小值': round(float(row['min']), 2),
                            '最大值': round(float(row['max']), 2)
                        })
            
            # 如果聚合结果太多，取前50条
            return aggregated_results[:50]
            
        except Exception as e:
            logger.warning(f"智能聚合失败: {str(e)}")
            # 降级到简单统计
            return cls._simple_statistics(df)
    
    @classmethod
    def _simple_statistics(cls, df: pd.DataFrame) -> List[Dict[str, Any]]:
        """简单统计"""
        
        stats = []
        
        # 数值列统计
        numeric_columns = df.select_dtypes(include=[np.number]).columns
        for col in numeric_columns:
            stats.append({
                '字段名': col,
                '数据类型': '数值',
                '记录数': int(df[col].count()),
                '平均值': round(float(df[col].mean()), 2),
                '中位数': round(float(df[col].median()), 2),
                '标准差': round(float(df[col].std()), 2),
                '最小值': round(float(df[col].min()), 2),
                '最大值': round(float(df[col].max()), 2)
            })
        
        # 分类列统计
        categorical_columns = df.select_dtypes(include=['object']).columns
        for col in categorical_columns:
            value_counts = df[col].value_counts().head(5)
            stats.append({
                '字段名': col,
                '数据类型': '分类',
                '记录数': int(df[col].count()),
                '唯一值数量': int(df[col].nunique()),
                '前5个值': value_counts.to_dict()
            })
        
        return stats
    
    @classmethod
    def _generate_data_overview(cls, data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """生成数据概览"""
        
        if not data:
            return {}
        
        try:
            df = pd.DataFrame(data)
            
            return {
                '总记录数': len(df),
                '字段数量': len(df.columns),
                '字段列表': list(df.columns),
                '数值字段': list(df.select_dtypes(include=[np.number]).columns),
                '分类字段': list(df.select_dtypes(include=['object']).columns),
                '缺失值情况': df.isnull().sum().to_dict()
            }
            
        except Exception as e:
            logger.warning(f"生成数据概览失败: {str(e)}")
            return {'总记录数': len(data)}
    
    @classmethod
    def _generate_statistical_summary(cls, df: pd.DataFrame) -> Dict[str, Any]:
        """生成统计摘要"""
        
        try:
            summary = {
                '基本信息': {
                    '总记录数': len(df),
                    '字段数量': len(df.columns),
                    '内存使用': f"{df.memory_usage(deep=True).sum() / 1024 / 1024:.2f} MB"
                },
                '数值字段统计': {},
                '分类字段统计': {}
            }
            
            # 数值字段统计
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            for col in numeric_columns:
                summary['数值字段统计'][col] = {
                    '平均值': round(float(df[col].mean()), 2),
                    '中位数': round(float(df[col].median()), 2),
                    '标准差': round(float(df[col].std()), 2),
                    '最小值': round(float(df[col].min()), 2),
                    '最大值': round(float(df[col].max()), 2),
                    '缺失值': int(df[col].isnull().sum())
                }
            
            # 分类字段统计
            categorical_columns = df.select_dtypes(include=['object']).columns
            for col in categorical_columns:
                top_values = df[col].value_counts().head(3)
                summary['分类字段统计'][col] = {
                    '唯一值数量': int(df[col].nunique()),
                    '最频繁的值': top_values.to_dict(),
                    '缺失值': int(df[col].isnull().sum())
                }
            
            return summary
            
        except Exception as e:
            logger.warning(f"生成统计摘要失败: {str(e)}")
            return {'基本信息': {'总记录数': len(df)}}
    
    @classmethod
    def _generate_detailed_summary(cls, df: pd.DataFrame) -> Dict[str, Any]:
        """生成详细统计摘要"""
        
        try:
            # 基础统计摘要
            basic_summary = cls._generate_statistical_summary(df)
            
            # 添加更多详细信息
            detailed_info = {
                '数据分布': {},
                '相关性分析': {},
                '异常值检测': {}
            }
            
            # 数值列的分布信息
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            for col in numeric_columns[:5]:  # 最多分析5个数值列
                try:
                    detailed_info['数据分布'][col] = {
                        '分位数': {
                            '25%': round(float(df[col].quantile(0.25)), 2),
                            '50%': round(float(df[col].quantile(0.5)), 2),
                            '75%': round(float(df[col].quantile(0.75)), 2)
                        },
                        '偏度': round(float(df[col].skew()), 2),
                        '峰度': round(float(df[col].kurtosis()), 2)
                    }
                except:
                    pass
            
            # 合并结果
            basic_summary.update(detailed_info)
            return basic_summary
            
        except Exception as e:
            logger.warning(f"生成详细摘要失败: {str(e)}")
            return cls._generate_statistical_summary(df)
    
    @classmethod
    def _extract_key_insights(cls, df: pd.DataFrame, task_description: str) -> List[str]:
        """提取关键洞察"""
        
        insights = []
        
        try:
            # 基础洞察
            insights.append(f"数据集包含 {len(df)} 条记录，{len(df.columns)} 个字段")
            
            # 数值字段洞察
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            if len(numeric_columns) > 0:
                for col in numeric_columns[:3]:
                    mean_val = df[col].mean()
                    median_val = df[col].median()
                    if abs(mean_val - median_val) / median_val > 0.2:
                        insights.append(f"{col} 字段存在明显偏态分布（均值: {mean_val:.2f}, 中位数: {median_val:.2f}）")
            
            # 分类字段洞察
            categorical_columns = df.select_dtypes(include=['object']).columns
            if len(categorical_columns) > 0:
                for col in categorical_columns[:2]:
                    unique_count = df[col].nunique()
                    total_count = len(df)
                    if unique_count / total_count < 0.1:
                        insights.append(f"{col} 字段具有较高的集中度（{unique_count} 个唯一值）")
            
            # 缺失值洞察
            missing_data = df.isnull().sum()
            high_missing_cols = missing_data[missing_data > len(df) * 0.1]
            if len(high_missing_cols) > 0:
                insights.append(f"以下字段存在较多缺失值: {list(high_missing_cols.index)}")
            
        except Exception as e:
            logger.warning(f"提取关键洞察失败: {str(e)}")
            insights.append("数据分析过程中遇到异常，建议检查数据质量")
        
        return insights[:10]  # 最多返回10个洞察 