"""
查询优化器
提供智能查询优化、执行计划分析和性能监控
"""

import time
import asyncio
import hashlib
from typing import List, Dict, Any, Optional, Tuple, Union
from datetime import datetime, timedelta
from dataclasses import dataclass, field
from enum import Enum
from collections import defaultdict, deque
from sqlalchemy import text, select, func, and_, or_
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import Select
from sqlalchemy.engine import Result

from .database import DatabaseManager
from .cache import CacheManager, CacheLevel
from ..core.config import get_settings


class QueryType(Enum):
    """查询类型"""
    SELECT = "SELECT"
    INSERT = "INSERT"
    UPDATE = "UPDATE"
    DELETE = "DELETE"
    BULK_INSERT = "BULK_INSERT"
    AGGREGATE = "AGGREGATE"


class OptimizationStrategy(Enum):
    """优化策略"""
    INDEX_HINT = "INDEX_HINT"
    PARTITION_PRUNING = "PARTITION_PRUNING"
    QUERY_REWRITE = "QUERY_REWRITE"
    BATCH_OPTIMIZATION = "BATCH_OPTIMIZATION"
    CACHE_OPTIMIZATION = "CACHE_OPTIMIZATION"
    PARALLEL_EXECUTION = "PARALLEL_EXECUTION"


@dataclass
class QueryMetrics:
    """查询指标"""
    query_id: str
    query_type: QueryType
    sql: str
    execution_time: float
    rows_affected: int
    cache_hit: bool = False
    optimization_applied: List[OptimizationStrategy] = field(default_factory=list)
    timestamp: datetime = field(default_factory=datetime.now)
    session_id: Optional[str] = None
    error: Optional[str] = None


@dataclass
class QueryPlan:
    """查询执行计划"""
    query_hash: str
    plan_text: str
    cost_estimate: float
    rows_estimate: int
    execution_time_estimate: float
    indexes_used: List[str] = field(default_factory=list)
    table_scans: List[str] = field(default_factory=list)
    optimization_suggestions: List[str] = field(default_factory=list)


class QueryPatternAnalyzer:
    """查询模式分析器"""
    
    def __init__(self, max_patterns: int = 1000):
        self.max_patterns = max_patterns
        self.query_patterns = defaultdict(list)
        self.frequent_patterns = {}
        self.slow_queries = deque(maxlen=100)
    
    def analyze_query(self, sql: str) -> Dict[str, Any]:
        """分析查询模式"""
        # 标准化SQL
        normalized_sql = self._normalize_sql(sql)
        pattern_hash = hashlib.md5(normalized_sql.encode()).hexdigest()
        
        # 提取查询特征
        features = self._extract_features(sql)
        
        # 记录模式
        self.query_patterns[pattern_hash].append({
            'sql': sql,
            'normalized': normalized_sql,
            'features': features,
            'timestamp': datetime.now()
        })
        
        return {
            'pattern_hash': pattern_hash,
            'features': features,
            'is_frequent': pattern_hash in self.frequent_patterns
        }
    
    def _normalize_sql(self, sql: str) -> str:
        """标准化SQL语句"""
        # 移除多余空格
        sql = ' '.join(sql.split())
        
        # 替换参数占位符
        import re
        sql = re.sub(r"'[^']*'", "'?'", sql)  # 字符串参数
        sql = re.sub(r'\b\d+\b', '?', sql)    # 数字参数
        
        return sql.upper()
    
    def _extract_features(self, sql: str) -> Dict[str, Any]:
        """提取查询特征"""
        features = {
            'has_join': 'JOIN' in sql.upper(),
            'has_subquery': '(' in sql and 'SELECT' in sql.upper(),
            'has_aggregate': any(func in sql.upper() for func in ['COUNT', 'SUM', 'AVG', 'MAX', 'MIN']),
            'has_order_by': 'ORDER BY' in sql.upper(),
            'has_group_by': 'GROUP BY' in sql.upper(),
            'has_limit': 'LIMIT' in sql.upper(),
            'table_count': sql.upper().count('FROM') + sql.upper().count('JOIN'),
            'where_conditions': sql.upper().count('WHERE') + sql.upper().count('AND') + sql.upper().count('OR')
        }
        
        return features
    
    def get_optimization_suggestions(self, pattern_hash: str) -> List[str]:
        """获取优化建议"""
        if pattern_hash not in self.query_patterns:
            return []
        
        pattern_data = self.query_patterns[pattern_hash][-1]
        features = pattern_data['features']
        suggestions = []
        
        if features['has_join'] and features['table_count'] > 3:
            suggestions.append("考虑添加适当的索引以优化多表连接")
        
        if features['has_aggregate'] and not features['has_group_by']:
            suggestions.append("聚合查询可能受益于列存储索引")
        
        if features['where_conditions'] > 5:
            suggestions.append("复杂WHERE条件可能需要复合索引")
        
        if not features['has_limit'] and features['table_count'] > 1:
            suggestions.append("考虑添加LIMIT子句限制结果集大小")
        
        return suggestions


class IndexOptimizer:
    """索引优化器"""
    
    def __init__(self, db_manager: DatabaseManager):
        self.db_manager = db_manager
        self.index_usage_stats = defaultdict(int)
        self.missing_indexes = []
    
    async def analyze_query_indexes(self, session: AsyncSession, sql: str) -> Dict[str, Any]:
        """分析查询的索引使用情况"""
        try:
            # 获取查询执行计划
            explain_sql = f"EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON) {sql}"
            result = await session.execute(text(explain_sql))
            plan_data = result.scalar()
            
            # 解析执行计划
            analysis = self._parse_execution_plan(plan_data)
            
            return analysis
        except Exception as e:
            return {'error': str(e), 'indexes_used': [], 'table_scans': []}
    
    def _parse_execution_plan(self, plan_data: List[Dict]) -> Dict[str, Any]:
        """解析PostgreSQL执行计划"""
        if not plan_data or not isinstance(plan_data, list):
            return {'indexes_used': [], 'table_scans': [], 'cost': 0}
        
        plan = plan_data[0].get('Plan', {})
        
        indexes_used = []
        table_scans = []
        total_cost = plan.get('Total Cost', 0)
        
        def extract_scan_info(node):
            node_type = node.get('Node Type', '')
            
            if 'Index' in node_type:
                index_name = node.get('Index Name', '')
                if index_name:
                    indexes_used.append(index_name)
            elif 'Seq Scan' in node_type:
                relation_name = node.get('Relation Name', '')
                if relation_name:
                    table_scans.append(relation_name)
            
            # 递归处理子节点
            for child in node.get('Plans', []):
                extract_scan_info(child)
        
        extract_scan_info(plan)
        
        return {
            'indexes_used': indexes_used,
            'table_scans': table_scans,
            'total_cost': total_cost,
            'execution_time': plan.get('Actual Total Time', 0)
        }
    
    async def suggest_indexes(self, session: AsyncSession, table_name: str) -> List[str]:
        """建议索引"""
        suggestions = []
        
        try:
            # 获取表的列信息
            columns_sql = """
                SELECT column_name, data_type, is_nullable
                FROM information_schema.columns
                WHERE table_name = :table_name
                ORDER BY ordinal_position
            """
            result = await session.execute(text(columns_sql), {'table_name': table_name})
            columns = result.fetchall()
            
            # 获取现有索引
            indexes_sql = """
                SELECT indexname, indexdef
                FROM pg_indexes
                WHERE tablename = :table_name
            """
            result = await session.execute(text(indexes_sql), {'table_name': table_name})
            existing_indexes = result.fetchall()
            
            existing_index_columns = set()
            for idx in existing_indexes:
                # 简单解析索引定义中的列名
                import re
                columns_match = re.search(r'\((.*?)\)', idx.indexdef)
                if columns_match:
                    idx_columns = [col.strip() for col in columns_match.group(1).split(',')]
                    existing_index_columns.update(idx_columns)
            
            # 建议常用查询字段的索引
            common_query_columns = ['symbol', 'stock_code', 'trade_date', 'created_at', 'updated_at']
            for col in columns:
                col_name = col.column_name
                if col_name in common_query_columns and col_name not in existing_index_columns:
                    suggestions.append(f"CREATE INDEX idx_{table_name}_{col_name} ON {table_name} ({col_name});")
            
            # 建议复合索引
            if 'symbol' in [col.column_name for col in columns] and 'trade_date' in [col.column_name for col in columns]:
                if 'symbol,trade_date' not in ','.join(existing_index_columns):
                    suggestions.append(f"CREATE INDEX idx_{table_name}_symbol_date ON {table_name} (symbol, trade_date);")
        
        except Exception as e:
            suggestions.append(f"-- 索引分析错误: {str(e)}")
        
        return suggestions


class QueryOptimizer:
    """查询优化器主类"""
    
    def __init__(
        self,
        db_manager: DatabaseManager,
        cache_manager: CacheManager,
        enable_monitoring: bool = True
    ):
        self.db_manager = db_manager
        self.cache_manager = cache_manager
        self.enable_monitoring = enable_monitoring
        
        self.pattern_analyzer = QueryPatternAnalyzer()
        self.index_optimizer = IndexOptimizer(db_manager)
        
        self.query_metrics = deque(maxlen=10000)
        self.optimization_rules = []
        self.performance_thresholds = {
            'slow_query_time': 1.0,  # 1秒
            'cache_hit_rate_min': 0.8,  # 80%
            'max_table_scans': 2
        }
    
    async def optimize_query(
        self,
        session: AsyncSession,
        query: Union[str, Select],
        query_type: QueryType = QueryType.SELECT
    ) -> Tuple[Union[str, Select], List[OptimizationStrategy]]:
        """优化查询"""
        if isinstance(query, Select):
            sql = str(query.compile(compile_kwargs={"literal_binds": True}))
        else:
            sql = query
        
        applied_strategies = []
        optimized_query = query
        
        # 分析查询模式
        pattern_analysis = self.pattern_analyzer.analyze_query(sql)
        
        # 应用优化策略
        if pattern_analysis['features']['has_join']:
            optimized_query, strategy = await self._optimize_joins(optimized_query)
            if strategy:
                applied_strategies.append(strategy)
        
        if pattern_analysis['features']['has_aggregate']:
            optimized_query, strategy = await self._optimize_aggregates(optimized_query)
            if strategy:
                applied_strategies.append(strategy)
        
        # 缓存优化
        if await self._should_cache_query(sql):
            applied_strategies.append(OptimizationStrategy.CACHE_OPTIMIZATION)
        
        return optimized_query, applied_strategies
    
    async def _optimize_joins(self, query: Union[str, Select]) -> Tuple[Union[str, Select], Optional[OptimizationStrategy]]:
        """优化连接查询"""
        # 这里可以实现具体的连接优化逻辑
        # 例如：重排连接顺序、添加索引提示等
        return query, OptimizationStrategy.QUERY_REWRITE
    
    async def _optimize_aggregates(self, query: Union[str, Select]) -> Tuple[Union[str, Select], Optional[OptimizationStrategy]]:
        """优化聚合查询"""
        # 这里可以实现聚合查询优化
        # 例如：使用物化视图、预计算结果等
        return query, OptimizationStrategy.BATCH_OPTIMIZATION
    
    async def _should_cache_query(self, sql: str) -> bool:
        """判断是否应该缓存查询"""
        # 检查查询是否适合缓存
        if 'SELECT' not in sql.upper():
            return False
        
        if 'NOW()' in sql.upper() or 'CURRENT_TIMESTAMP' in sql.upper():
            return False
        
        return True
    
    async def execute_optimized_query(
        self,
        session: AsyncSession,
        query: Union[str, Select],
        query_type: QueryType = QueryType.SELECT,
        cache_ttl: Optional[int] = None
    ) -> Tuple[Result, QueryMetrics]:
        """执行优化后的查询"""
        start_time = time.time()
        query_id = hashlib.md5(f"{query}{time.time()}".encode()).hexdigest()[:16]
        
        # 优化查询
        optimized_query, strategies = await self.optimize_query(session, query, query_type)
        
        # 检查缓存
        cache_hit = False
        if OptimizationStrategy.CACHE_OPTIMIZATION in strategies and cache_ttl:
            cache_key = f"query:{hashlib.md5(str(optimized_query).encode()).hexdigest()}"
            cached_result = await self.cache_manager.get(cache_key)
            if cached_result:
                cache_hit = True
                # 这里需要反序列化缓存结果
                # 简化实现，实际需要更复杂的序列化逻辑
        
        # 执行查询
        try:
            if isinstance(optimized_query, str):
                result = await session.execute(text(optimized_query))
            else:
                result = await session.execute(optimized_query)
            
            execution_time = time.time() - start_time
            rows_affected = result.rowcount if hasattr(result, 'rowcount') else 0
            
            # 缓存结果
            if not cache_hit and OptimizationStrategy.CACHE_OPTIMIZATION in strategies and cache_ttl:
                cache_key = f"query:{hashlib.md5(str(optimized_query).encode()).hexdigest()}"
                # 这里需要序列化结果
                # await self.cache_manager.set(cache_key, serialized_result, ttl=cache_ttl)
            
            # 记录指标
            metrics = QueryMetrics(
                query_id=query_id,
                query_type=query_type,
                sql=str(optimized_query),
                execution_time=execution_time,
                rows_affected=rows_affected,
                cache_hit=cache_hit,
                optimization_applied=strategies
            )
            
            if self.enable_monitoring:
                self.query_metrics.append(metrics)
                
                # 检查慢查询
                if execution_time > self.performance_thresholds['slow_query_time']:
                    await self._handle_slow_query(metrics)
            
            return result, metrics
            
        except Exception as e:
            execution_time = time.time() - start_time
            metrics = QueryMetrics(
                query_id=query_id,
                query_type=query_type,
                sql=str(optimized_query),
                execution_time=execution_time,
                rows_affected=0,
                cache_hit=cache_hit,
                optimization_applied=strategies,
                error=str(e)
            )
            
            if self.enable_monitoring:
                self.query_metrics.append(metrics)
            
            raise
    
    async def _handle_slow_query(self, metrics: QueryMetrics):
        """处理慢查询"""
        # 记录慢查询
        self.pattern_analyzer.slow_queries.append(metrics)
        
        # 分析索引使用情况
        if hasattr(self.db_manager, 'get_session'):
            async with self.db_manager.get_session() as session:
                index_analysis = await self.index_optimizer.analyze_query_indexes(
                    session, metrics.sql
                )
                
                # 如果有全表扫描，记录建议
                if index_analysis.get('table_scans'):
                    for table in index_analysis['table_scans']:
                        suggestions = await self.index_optimizer.suggest_indexes(session, table)
                        # 这里可以记录到日志或发送告警
    
    def get_performance_stats(self) -> Dict[str, Any]:
        """获取性能统计"""
        if not self.query_metrics:
            return {}
        
        total_queries = len(self.query_metrics)
        cache_hits = sum(1 for m in self.query_metrics if m.cache_hit)
        slow_queries = sum(1 for m in self.query_metrics 
                          if m.execution_time > self.performance_thresholds['slow_query_time'])
        
        avg_execution_time = sum(m.execution_time for m in self.query_metrics) / total_queries
        
        query_types = defaultdict(int)
        for metrics in self.query_metrics:
            query_types[metrics.query_type.value] += 1
        
        return {
            'total_queries': total_queries,
            'cache_hit_rate': (cache_hits / total_queries * 100) if total_queries > 0 else 0,
            'slow_query_count': slow_queries,
            'slow_query_rate': (slow_queries / total_queries * 100) if total_queries > 0 else 0,
            'avg_execution_time': round(avg_execution_time, 4),
            'query_types': dict(query_types),
            'optimization_strategies_used': self._get_strategy_stats()
        }
    
    def _get_strategy_stats(self) -> Dict[str, int]:
        """获取优化策略使用统计"""
        strategy_counts = defaultdict(int)
        for metrics in self.query_metrics:
            for strategy in metrics.optimization_applied:
                strategy_counts[strategy.value] += 1
        return dict(strategy_counts)


# 全局查询优化器实例
_query_optimizer: Optional[QueryOptimizer] = None

def get_query_optimizer() -> QueryOptimizer:
    """获取全局查询优化器实例"""
    global _query_optimizer
    if _query_optimizer is None:
        from .database import db_manager
        from .cache import cache_manager
        
        _query_optimizer = QueryOptimizer(db_manager, cache_manager)
    
    return _query_optimizer


# 为了向后兼容，提供query_optimizer全局实例
query_optimizer = get_query_optimizer()