"""
数据聚合优化器
提供预计算、实时聚合、并行计算等高性能数据聚合功能
"""

import asyncio
import time
import json
from typing import List, Dict, Any, Optional, Callable, Union, Tuple
from datetime import datetime, date, timedelta
from dataclasses import dataclass, field
from enum import Enum
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from sqlalchemy import select, func, and_, or_, text, case, literal_column
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import Select
from sqlalchemy.orm import selectinload

from .database import DatabaseManager
from .cache import CacheManager, CacheLevel, CacheStrategy
from .query_optimizer import QueryOptimizer, QueryType
from ..core.config import get_settings


class AggregationType(Enum):
    """聚合类型"""
    SUM = "SUM"
    AVG = "AVG"
    COUNT = "COUNT"
    MAX = "MAX"
    MIN = "MIN"
    STDDEV = "STDDEV"
    VARIANCE = "VARIANCE"
    PERCENTILE = "PERCENTILE"
    MEDIAN = "MEDIAN"
    CUSTOM = "CUSTOM"


class AggregationStrategy(Enum):
    """聚合策略"""
    REAL_TIME = "REAL_TIME"           # 实时计算
    PRE_COMPUTED = "PRE_COMPUTED"     # 预计算
    HYBRID = "HYBRID"                 # 混合模式
    PARALLEL = "PARALLEL"             # 并行计算
    INCREMENTAL = "INCREMENTAL"       # 增量计算


class TimeGranularity(Enum):
    """时间粒度"""
    MINUTE = "MINUTE"
    HOUR = "HOUR"
    DAY = "DAY"
    WEEK = "WEEK"
    MONTH = "MONTH"
    QUARTER = "QUARTER"
    YEAR = "YEAR"


@dataclass
class AggregationConfig:
    """聚合配置"""
    name: str
    aggregation_type: AggregationType
    strategy: AggregationStrategy
    source_table: str
    source_columns: List[str]
    group_by_columns: List[str] = field(default_factory=list)
    filter_conditions: Dict[str, Any] = field(default_factory=dict)
    time_column: Optional[str] = None
    time_granularity: Optional[TimeGranularity] = None
    cache_ttl: int = 300  # 5分钟默认缓存
    refresh_interval: int = 3600  # 1小时默认刷新间隔
    enable_incremental: bool = True
    parallel_workers: int = 4


@dataclass
class AggregationResult:
    """聚合结果"""
    config_name: str
    result_data: Dict[str, Any]
    computation_time: float
    strategy_used: AggregationStrategy
    cache_hit: bool = False
    rows_processed: int = 0
    timestamp: datetime = field(default_factory=datetime.now)
    error: Optional[str] = None


class PreComputedAggregation:
    """预计算聚合管理器"""
    
    def __init__(self, db_manager: DatabaseManager, cache_manager: CacheManager):
        self.db_manager = db_manager
        self.cache_manager = cache_manager
        self.materialized_views = {}
        self.refresh_schedules = {}
    
    async def create_materialized_view(
        self,
        session: AsyncSession,
        config: AggregationConfig
    ) -> bool:
        """创建物化视图"""
        try:
            view_name = f"mv_{config.name}"
            
            # 构建聚合SQL
            agg_sql = self._build_aggregation_sql(config)
            
            # 创建物化视图
            create_mv_sql = f"""
                CREATE MATERIALIZED VIEW {view_name} AS
                {agg_sql}
                WITH DATA;
            """
            
            await session.execute(text(create_mv_sql))
            await session.commit()
            
            # 创建索引
            if config.group_by_columns:
                index_columns = ", ".join(config.group_by_columns)
                index_sql = f"""
                    CREATE INDEX idx_{view_name}_groups 
                    ON {view_name} ({index_columns});
                """
                await session.execute(text(index_sql))
                await session.commit()
            
            self.materialized_views[config.name] = view_name
            return True
            
        except Exception as e:
            await session.rollback()
            print(f"创建物化视图失败: {e}")
            return False
    
    def _build_aggregation_sql(self, config: AggregationConfig) -> str:
        """构建聚合SQL"""
        # 构建SELECT子句
        select_parts = []
        
        # 添加分组列
        for col in config.group_by_columns:
            select_parts.append(col)
        
        # 添加聚合列
        for col in config.source_columns:
            if config.aggregation_type == AggregationType.SUM:
                select_parts.append(f"SUM({col}) as sum_{col}")
            elif config.aggregation_type == AggregationType.AVG:
                select_parts.append(f"AVG({col}) as avg_{col}")
            elif config.aggregation_type == AggregationType.COUNT:
                select_parts.append(f"COUNT({col}) as count_{col}")
            elif config.aggregation_type == AggregationType.MAX:
                select_parts.append(f"MAX({col}) as max_{col}")
            elif config.aggregation_type == AggregationType.MIN:
                select_parts.append(f"MIN({col}) as min_{col}")
        
        # 添加时间聚合
        if config.time_column and config.time_granularity:
            time_expr = self._get_time_expression(config.time_column, config.time_granularity)
            select_parts.append(f"{time_expr} as time_period")
        
        select_clause = ", ".join(select_parts)
        
        # 构建FROM子句
        from_clause = config.source_table
        
        # 构建WHERE子句
        where_conditions = []
        for col, value in config.filter_conditions.items():
            if isinstance(value, list):
                where_conditions.append(f"{col} IN ({', '.join(map(str, value))})")
            else:
                where_conditions.append(f"{col} = {value}")
        
        where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
        
        # 构建GROUP BY子句
        group_by_parts = config.group_by_columns.copy()
        if config.time_column and config.time_granularity:
            time_expr = self._get_time_expression(config.time_column, config.time_granularity)
            group_by_parts.append(time_expr)
        
        group_by_clause = ", ".join(group_by_parts) if group_by_parts else ""
        
        # 组装完整SQL
        sql = f"SELECT {select_clause} FROM {from_clause} WHERE {where_clause}"
        if group_by_clause:
            sql += f" GROUP BY {group_by_clause}"
        
        return sql
    
    def _get_time_expression(self, time_column: str, granularity: TimeGranularity) -> str:
        """获取时间表达式"""
        if granularity == TimeGranularity.DAY:
            return f"DATE_TRUNC('day', {time_column})"
        elif granularity == TimeGranularity.HOUR:
            return f"DATE_TRUNC('hour', {time_column})"
        elif granularity == TimeGranularity.WEEK:
            return f"DATE_TRUNC('week', {time_column})"
        elif granularity == TimeGranularity.MONTH:
            return f"DATE_TRUNC('month', {time_column})"
        elif granularity == TimeGranularity.QUARTER:
            return f"DATE_TRUNC('quarter', {time_column})"
        elif granularity == TimeGranularity.YEAR:
            return f"DATE_TRUNC('year', {time_column})"
        else:
            return time_column
    
    async def refresh_materialized_view(
        self,
        session: AsyncSession,
        config_name: str
    ) -> bool:
        """刷新物化视图"""
        if config_name not in self.materialized_views:
            return False
        
        try:
            view_name = self.materialized_views[config_name]
            refresh_sql = f"REFRESH MATERIALIZED VIEW {view_name};"
            await session.execute(text(refresh_sql))
            await session.commit()
            return True
        except Exception as e:
            await session.rollback()
            print(f"刷新物化视图失败: {e}")
            return False
    
    async def query_materialized_view(
        self,
        session: AsyncSession,
        config_name: str,
        filters: Dict[str, Any] = None
    ) -> List[Dict[str, Any]]:
        """查询物化视图"""
        if config_name not in self.materialized_views:
            return []
        
        view_name = self.materialized_views[config_name]
        sql = f"SELECT * FROM {view_name}"
        
        if filters:
            where_conditions = []
            for col, value in filters.items():
                if isinstance(value, list):
                    placeholders = ', '.join([f"'{v}'" if isinstance(v, str) else str(v) for v in value])
                    where_conditions.append(f"{col} IN ({placeholders})")
                else:
                    if isinstance(value, str):
                        where_conditions.append(f"{col} = '{value}'")
                    else:
                        where_conditions.append(f"{col} = {value}")
            
            if where_conditions:
                sql += " WHERE " + " AND ".join(where_conditions)
        
        result = await session.execute(text(sql))
        return [dict(row._mapping) for row in result.fetchall()]


class RealTimeAggregator:
    """实时聚合器"""
    
    def __init__(self, db_manager: DatabaseManager, query_optimizer: QueryOptimizer):
        self.db_manager = db_manager
        self.query_optimizer = query_optimizer
    
    async def compute_aggregation(
        self,
        session: AsyncSession,
        config: AggregationConfig
    ) -> Dict[str, Any]:
        """计算实时聚合"""
        start_time = time.time()
        
        try:
            # 构建聚合查询
            query = self._build_sqlalchemy_query(config)
            
            # 使用查询优化器执行
            result, metrics = await self.query_optimizer.execute_optimized_query(
                session, query, QueryType.AGGREGATE
            )
            
            # 处理结果
            rows = result.fetchall()
            processed_data = [dict(row._mapping) for row in rows]
            
            computation_time = time.time() - start_time
            
            return {
                'data': processed_data,
                'computation_time': computation_time,
                'rows_processed': len(processed_data),
                'query_metrics': metrics
            }
            
        except Exception as e:
            return {
                'data': [],
                'computation_time': time.time() - start_time,
                'rows_processed': 0,
                'error': str(e)
            }
    
    def _build_sqlalchemy_query(self, config: AggregationConfig) -> Select:
        """构建SQLAlchemy查询"""
        # 这里需要根据实际的模型类来构建查询
        # 简化实现，实际应用中需要动态获取模型类
        
        # 构建基础查询
        base_query = text(self._build_aggregation_sql_for_realtime(config))
        return select(literal_column("*")).select_from(text(f"({base_query}) as subquery"))
    
    def _build_aggregation_sql_for_realtime(self, config: AggregationConfig) -> str:
        """为实时聚合构建SQL"""
        # 构建SELECT子句
        select_parts = []
        
        # 添加分组列
        for col in config.group_by_columns:
            select_parts.append(col)
        
        # 添加聚合列
        for col in config.source_columns:
            if config.aggregation_type == AggregationType.SUM:
                select_parts.append(f"SUM({col}) as sum_{col}")
            elif config.aggregation_type == AggregationType.AVG:
                select_parts.append(f"AVG({col}) as avg_{col}")
            elif config.aggregation_type == AggregationType.COUNT:
                select_parts.append(f"COUNT({col}) as count_{col}")
            elif config.aggregation_type == AggregationType.MAX:
                select_parts.append(f"MAX({col}) as max_{col}")
            elif config.aggregation_type == AggregationType.MIN:
                select_parts.append(f"MIN({col}) as min_{col}")
            elif config.aggregation_type == AggregationType.STDDEV:
                select_parts.append(f"STDDEV({col}) as stddev_{col}")
            elif config.aggregation_type == AggregationType.VARIANCE:
                select_parts.append(f"VARIANCE({col}) as variance_{col}")
        
        select_clause = ", ".join(select_parts)
        
        # 构建FROM子句
        from_clause = config.source_table
        
        # 构建WHERE子句
        where_conditions = ["1=1"]  # 默认条件
        for col, value in config.filter_conditions.items():
            if isinstance(value, list):
                value_list = ', '.join([f"'{v}'" if isinstance(v, str) else str(v) for v in value])
                where_conditions.append(f"{col} IN ({value_list})")
            else:
                if isinstance(value, str):
                    where_conditions.append(f"{col} = '{value}'")
                else:
                    where_conditions.append(f"{col} = {value}")
        
        where_clause = " AND ".join(where_conditions)
        
        # 构建GROUP BY子句
        group_by_clause = ", ".join(config.group_by_columns) if config.group_by_columns else ""
        
        # 组装完整SQL
        sql = f"SELECT {select_clause} FROM {from_clause} WHERE {where_clause}"
        if group_by_clause:
            sql += f" GROUP BY {group_by_clause}"
        
        return sql


class ParallelAggregator:
    """并行聚合器"""
    
    def __init__(self, db_manager: DatabaseManager, max_workers: int = 4):
        self.db_manager = db_manager
        self.max_workers = max_workers
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
    
    async def compute_parallel_aggregation(
        self,
        config: AggregationConfig,
        partition_strategy: str = "time"
    ) -> Dict[str, Any]:
        """并行计算聚合"""
        start_time = time.time()
        
        try:
            # 生成分区
            partitions = await self._generate_partitions(config, partition_strategy)
            
            # 并行执行聚合
            tasks = []
            for partition in partitions:
                task = asyncio.create_task(self._compute_partition_aggregation(config, partition))
                tasks.append(task)
            
            # 等待所有任务完成
            partition_results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 合并结果
            merged_result = self._merge_partition_results(partition_results, config)
            
            computation_time = time.time() - start_time
            
            return {
                'data': merged_result,
                'computation_time': computation_time,
                'partitions_processed': len(partitions),
                'parallel_workers': len(tasks)
            }
            
        except Exception as e:
            return {
                'data': [],
                'computation_time': time.time() - start_time,
                'partitions_processed': 0,
                'error': str(e)
            }
    
    async def _generate_partitions(
        self,
        config: AggregationConfig,
        strategy: str
    ) -> List[Dict[str, Any]]:
        """生成分区"""
        partitions = []
        
        if strategy == "time" and config.time_column:
            # 按时间分区
            async with self.db_manager.get_session() as session:
                # 获取时间范围
                min_max_sql = f"""
                    SELECT MIN({config.time_column}) as min_time, 
                           MAX({config.time_column}) as max_time
                    FROM {config.source_table}
                """
                result = await session.execute(text(min_max_sql))
                row = result.fetchone()
                
                if row and row.min_time and row.max_time:
                    min_time = row.min_time
                    max_time = row.max_time
                    
                    # 按天分区
                    current_time = min_time
                    while current_time <= max_time:
                        next_time = current_time + timedelta(days=1)
                        partitions.append({
                            'start_time': current_time,
                            'end_time': next_time,
                            'filter': {config.time_column: (current_time, next_time)}
                        })
                        current_time = next_time
        
        elif strategy == "hash":
            # 按哈希分区（基于主键或其他唯一字段）
            for i in range(self.max_workers):
                partitions.append({
                    'partition_id': i,
                    'filter': {'mod_condition': f"id % {self.max_workers} = {i}"}
                })
        
        return partitions
    
    async def _compute_partition_aggregation(
        self,
        config: AggregationConfig,
        partition: Dict[str, Any]
    ) -> Dict[str, Any]:
        """计算分区聚合"""
        try:
            async with self.db_manager.get_session() as session:
                # 修改配置以包含分区过滤条件
                partition_config = AggregationConfig(
                    name=f"{config.name}_partition",
                    aggregation_type=config.aggregation_type,
                    strategy=AggregationStrategy.REAL_TIME,
                    source_table=config.source_table,
                    source_columns=config.source_columns,
                    group_by_columns=config.group_by_columns,
                    filter_conditions={**config.filter_conditions, **partition.get('filter', {})}
                )
                
                # 使用实时聚合器计算
                real_time_aggregator = RealTimeAggregator(self.db_manager, None)
                result = await real_time_aggregator.compute_aggregation(session, partition_config)
                
                return result
                
        except Exception as e:
            return {'data': [], 'error': str(e)}
    
    def _merge_partition_results(
        self,
        partition_results: List[Dict[str, Any]],
        config: AggregationConfig
    ) -> List[Dict[str, Any]]:
        """合并分区结果"""
        merged_data = defaultdict(lambda: defaultdict(float))
        
        for result in partition_results:
            if isinstance(result, Exception):
                continue
            
            for row in result.get('data', []):
                # 构建分组键
                group_key = tuple(row.get(col, '') for col in config.group_by_columns)
                
                # 合并聚合值
                for col in config.source_columns:
                    if config.aggregation_type == AggregationType.SUM:
                        merged_data[group_key][f'sum_{col}'] += row.get(f'sum_{col}', 0)
                    elif config.aggregation_type == AggregationType.COUNT:
                        merged_data[group_key][f'count_{col}'] += row.get(f'count_{col}', 0)
                    elif config.aggregation_type == AggregationType.MAX:
                        current_max = merged_data[group_key].get(f'max_{col}', float('-inf'))
                        merged_data[group_key][f'max_{col}'] = max(current_max, row.get(f'max_{col}', 0))
                    elif config.aggregation_type == AggregationType.MIN:
                        current_min = merged_data[group_key].get(f'min_{col}', float('inf'))
                        merged_data[group_key][f'min_{col}'] = min(current_min, row.get(f'min_{col}', 0))
        
        # 转换为列表格式
        result_list = []
        for group_key, aggregated_values in merged_data.items():
            row_data = {}
            
            # 添加分组列
            for i, col in enumerate(config.group_by_columns):
                row_data[col] = group_key[i] if i < len(group_key) else None
            
            # 添加聚合值
            row_data.update(aggregated_values)
            
            result_list.append(row_data)
        
        return result_list


class AggregationOptimizer:
    """聚合优化器主类"""
    
    def __init__(
        self,
        db_manager: DatabaseManager,
        cache_manager: CacheManager,
        query_optimizer: QueryOptimizer
    ):
        self.db_manager = db_manager
        self.cache_manager = cache_manager
        self.query_optimizer = query_optimizer
        
        self.pre_computed = PreComputedAggregation(db_manager, cache_manager)
        self.real_time = RealTimeAggregator(db_manager, query_optimizer)
        self.parallel = ParallelAggregator(db_manager)
        
        self.aggregation_configs = {}
        self.performance_stats = defaultdict(list)
    
    def register_aggregation(self, config: AggregationConfig):
        """注册聚合配置"""
        self.aggregation_configs[config.name] = config
    
    async def compute_aggregation(
        self,
        config_name: str,
        filters: Dict[str, Any] = None,
        force_strategy: Optional[AggregationStrategy] = None
    ) -> AggregationResult:
        """计算聚合"""
        if config_name not in self.aggregation_configs:
            return AggregationResult(
                config_name=config_name,
                result_data={},
                computation_time=0,
                strategy_used=AggregationStrategy.REAL_TIME,
                error="配置不存在"
            )
        
        config = self.aggregation_configs[config_name]
        start_time = time.time()
        
        # 检查缓存
        cache_key = f"agg:{config_name}:{hash(str(filters))}"
        cache = self.cache_manager.get_cache("aggregation")
        cached_result = await cache.get(cache_key)
        if cached_result and not force_strategy:
            try:
                cached_data = json.loads(cached_result)
                return AggregationResult(
                    config_name=config_name,
                    result_data=cached_data,
                    computation_time=time.time() - start_time,
                    strategy_used=AggregationStrategy.PRE_COMPUTED,
                    cache_hit=True
                )
            except json.JSONDecodeError:
                pass
        
        # 选择策略
        strategy = force_strategy or self._select_optimal_strategy(config, filters)
        
        try:
            # 执行聚合
            if strategy == AggregationStrategy.PRE_COMPUTED:
                result_data = await self._compute_pre_computed(config, filters)
            elif strategy == AggregationStrategy.PARALLEL:
                result_data = await self._compute_parallel(config, filters)
            else:  # REAL_TIME
                result_data = await self._compute_real_time(config, filters)
            
            computation_time = time.time() - start_time
            
            # 缓存结果
            if result_data and not result_data.get('error'):
                await cache.set(
                    cache_key,
                    json.dumps(result_data, default=str),
                    ttl=config.cache_ttl
                )
            
            # 记录性能统计
            self.performance_stats[config_name].append({
                'strategy': strategy,
                'computation_time': computation_time,
                'rows_processed': result_data.get('rows_processed', 0),
                'timestamp': datetime.now()
            })
            
            return AggregationResult(
                config_name=config_name,
                result_data=result_data,
                computation_time=computation_time,
                strategy_used=strategy,
                rows_processed=result_data.get('rows_processed', 0),
                error=result_data.get('error')
            )
            
        except Exception as e:
            return AggregationResult(
                config_name=config_name,
                result_data={},
                computation_time=time.time() - start_time,
                strategy_used=strategy,
                error=str(e)
            )
    
    def _select_optimal_strategy(
        self,
        config: AggregationConfig,
        filters: Dict[str, Any] = None
    ) -> AggregationStrategy:
        """选择最优策略"""
        # 根据配置和历史性能选择策略
        if config.strategy != AggregationStrategy.HYBRID:
            return config.strategy
        
        # 混合模式下的智能选择
        if config.name in self.performance_stats:
            recent_stats = self.performance_stats[config.name][-10:]  # 最近10次
            
            # 如果预计算性能好且数据不是实时要求
            pre_computed_avg = sum(
                s['computation_time'] for s in recent_stats 
                if s['strategy'] == AggregationStrategy.PRE_COMPUTED
            ) / max(1, len([s for s in recent_stats if s['strategy'] == AggregationStrategy.PRE_COMPUTED]))
            
            real_time_avg = sum(
                s['computation_time'] for s in recent_stats 
                if s['strategy'] == AggregationStrategy.REAL_TIME
            ) / max(1, len([s for s in recent_stats if s['strategy'] == AggregationStrategy.REAL_TIME]))
            
            if pre_computed_avg < real_time_avg * 0.5:  # 预计算快50%以上
                return AggregationStrategy.PRE_COMPUTED
        
        # 默认使用实时计算
        return AggregationStrategy.REAL_TIME
    
    async def _compute_pre_computed(
        self,
        config: AggregationConfig,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """计算预计算聚合"""
        async with self.db_manager.get_session() as session:
            result = await self.pre_computed.query_materialized_view(
                session, config.name, filters
            )
            return {
                'data': result,
                'rows_processed': len(result)
            }
    
    async def _compute_real_time(
        self,
        config: AggregationConfig,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """计算实时聚合"""
        # 应用过滤条件
        if filters:
            config.filter_conditions.update(filters)
        
        async with self.db_manager.get_session() as session:
            return await self.real_time.compute_aggregation(session, config)
    
    async def _compute_parallel(
        self,
        config: AggregationConfig,
        filters: Dict[str, Any] = None
    ) -> Dict[str, Any]:
        """计算并行聚合"""
        # 应用过滤条件
        if filters:
            config.filter_conditions.update(filters)
        
        return await self.parallel.compute_parallel_aggregation(config)
    
    async def setup_pre_computed_aggregations(self):
        """设置预计算聚合"""
        async with self.db_manager.get_session() as session:
            for config in self.aggregation_configs.values():
                if config.strategy in [AggregationStrategy.PRE_COMPUTED, AggregationStrategy.HYBRID]:
                    await self.pre_computed.create_materialized_view(session, config)
    
    async def refresh_pre_computed_aggregations(self):
        """刷新预计算聚合"""
        async with self.db_manager.get_session() as session:
            for config_name in self.aggregation_configs:
                await self.pre_computed.refresh_materialized_view(session, config_name)
    
    def get_performance_summary(self) -> Dict[str, Any]:
        """获取性能摘要"""
        summary = {}
        
        for config_name, stats in self.performance_stats.items():
            if not stats:
                continue
            
            strategy_stats = defaultdict(list)
            for stat in stats:
                strategy_stats[stat['strategy']].append(stat['computation_time'])
            
            config_summary = {}
            for strategy, times in strategy_stats.items():
                config_summary[strategy.value] = {
                    'avg_time': sum(times) / len(times),
                    'min_time': min(times),
                    'max_time': max(times),
                    'count': len(times)
                }
            
            summary[config_name] = config_summary
        
        return summary


# 全局聚合优化器实例
_aggregation_optimizer: Optional[AggregationOptimizer] = None

def get_aggregation_optimizer() -> AggregationOptimizer:
    """获取全局聚合优化器实例"""
    global _aggregation_optimizer
    if _aggregation_optimizer is None:
        from .database import db_manager
        from .cache import cache_manager
        from .query_optimizer import query_optimizer
        
        _aggregation_optimizer = AggregationOptimizer(
            db_manager, cache_manager, query_optimizer
        )
    
    return _aggregation_optimizer


# 预设聚合配置
def get_preset_aggregation_configs() -> List[AggregationConfig]:
    """获取预设聚合配置"""
    return [
        # 股票价格聚合
        AggregationConfig(
            name="stock_price_daily",
            aggregation_type=AggregationType.AVG,
            strategy=AggregationStrategy.HYBRID,
            source_table="stock_daily_data",
            source_columns=["close_price", "volume", "turnover"],
            group_by_columns=["stock_code"],
            time_column="trade_date",
            time_granularity=TimeGranularity.DAY,
            cache_ttl=1800,  # 30分钟
            refresh_interval=3600  # 1小时
        ),
        
        # 财务报表聚合
        AggregationConfig(
            name="financial_quarterly",
            aggregation_type=AggregationType.SUM,
            strategy=AggregationStrategy.PRE_COMPUTED,
            source_table="financial_statements",
            source_columns=["revenue", "net_profit", "total_assets"],
            group_by_columns=["stock_code", "report_type"],
            time_column="report_date",
            time_granularity=TimeGranularity.QUARTER,
            cache_ttl=7200,  # 2小时
            refresh_interval=86400  # 24小时
        ),
        
        # 市场分析聚合
        AggregationConfig(
            name="market_analysis",
            aggregation_type=AggregationType.COUNT,
            strategy=AggregationStrategy.PARALLEL,
            source_table="stock_daily_data",
            source_columns=["stock_code"],
            group_by_columns=["market", "industry"],
            filter_conditions={"is_active": True},
            cache_ttl=3600,  # 1小时
            parallel_workers=6
        ),
        
        # 技术指标聚合
        AggregationConfig(
            name="technical_indicators",
            aggregation_type=AggregationType.AVG,
            strategy=AggregationStrategy.REAL_TIME,
            source_table="technical_indicators",
            source_columns=["ma5", "ma10", "ma20", "rsi", "macd"],
            group_by_columns=["stock_code"],
            time_column="trade_date",
            time_granularity=TimeGranularity.DAY,
            cache_ttl=900,  # 15分钟
            enable_incremental=True
        ),
        
        # 交易量统计
        AggregationConfig(
            name="volume_statistics",
            aggregation_type=AggregationType.SUM,
            strategy=AggregationStrategy.HYBRID,
            source_table="stock_daily_data",
            source_columns=["volume", "turnover"],
            group_by_columns=["market", "stock_code"],
            time_column="trade_date",
            time_granularity=TimeGranularity.WEEK,
            cache_ttl=1800,
            refresh_interval=7200
        )
    ]


async def initialize_aggregation_optimizer():
    """初始化聚合优化器"""
    optimizer = get_aggregation_optimizer()
    
    # 注册预设配置
    preset_configs = get_preset_aggregation_configs()
    for config in preset_configs:
        optimizer.register_aggregation(config)
    
    # 设置预计算聚合
    await optimizer.setup_pre_computed_aggregations()
    
    return optimizer


# 聚合查询辅助函数
async def quick_aggregate(
    table: str,
    columns: List[str],
    agg_type: AggregationType = AggregationType.SUM,
    group_by: List[str] = None,
    filters: Dict[str, Any] = None,
    strategy: AggregationStrategy = AggregationStrategy.REAL_TIME
) -> AggregationResult:
    """快速聚合查询"""
    optimizer = get_aggregation_optimizer()
    
    # 创建临时配置
    config = AggregationConfig(
        name=f"quick_{table}_{int(time.time())}",
        aggregation_type=agg_type,
        strategy=strategy,
        source_table=table,
        source_columns=columns,
        group_by_columns=group_by or [],
        filter_conditions=filters or {},
        cache_ttl=300  # 5分钟缓存
    )
    
    # 注册并执行
    optimizer.register_aggregation(config)
    result = await optimizer.compute_aggregation(config.name, filters)
    
    return result


# 性能监控装饰器
def monitor_aggregation_performance(func):
    """聚合性能监控装饰器"""
    async def wrapper(*args, **kwargs):
        start_time = time.time()
        try:
            result = await func(*args, **kwargs)
            execution_time = time.time() - start_time
            
            # 记录性能指标
            print(f"聚合函数 {func.__name__} 执行时间: {execution_time:.3f}秒")
            
            return result
        except Exception as e:
            execution_time = time.time() - start_time
            print(f"聚合函数 {func.__name__} 执行失败: {e}, 耗时: {execution_time:.3f}秒")
            raise
    
    return wrapper


# 批量聚合处理
async def batch_aggregate(
    configs: List[AggregationConfig],
    filters: Dict[str, Any] = None,
    max_concurrent: int = 5
) -> List[AggregationResult]:
    """批量聚合处理"""
    optimizer = get_aggregation_optimizer()
    
    # 注册所有配置
    for config in configs:
        optimizer.register_aggregation(config)
    
    # 创建信号量限制并发数
    semaphore = asyncio.Semaphore(max_concurrent)
    
    async def compute_with_semaphore(config_name: str):
        async with semaphore:
            return await optimizer.compute_aggregation(config_name, filters)
    
    # 并发执行所有聚合
    tasks = [compute_with_semaphore(config.name) for config in configs]
    results = await asyncio.gather(*tasks, return_exceptions=True)
    
    # 处理异常结果
    processed_results = []
    for i, result in enumerate(results):
        if isinstance(result, Exception):
            processed_results.append(AggregationResult(
                config_name=configs[i].name,
                result_data={},
                computation_time=0,
                strategy_used=AggregationStrategy.REAL_TIME,
                error=str(result)
            ))
        else:
            processed_results.append(result)
    
    return processed_results


# 创建全局聚合优化器实例
aggregation_optimizer = get_aggregation_optimizer()