"""
数据库查询优化服务
提供查询优化、索引建议和性能监控功能
"""
from typing import Dict, Any, List, Optional, Union
from datetime import datetime, timedelta
import time
import json
from sqlalchemy import text, func, and_, or_, not_
from sqlalchemy.orm import Session, Query
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql import select
import logging

from core.database import get_db
from core.logging_config import get_logger
from services.cache_service import cache_service, query_cache, cache_result

# 日志记录器
logger = get_logger("query_optimization_service")


class QueryOptimizationService:
    """查询优化服务"""
    
    def __init__(self):
        self.slow_query_threshold = 2.0  # 慢查询阈值（秒）
        self.query_stats = {}
        self.index_recommendations = {}
        
    def log_query_performance(self, query: str, params: tuple, execution_time: float):
        """记录查询性能"""
        query_hash = hash(query + str(params))
        
        if query_hash not in self.query_stats:
            self.query_stats[query_hash] = {
                'query': query,
                'params': params,
                'execution_count': 0,
                'total_time': 0,
                'avg_time': 0,
                'max_time': 0,
                'min_time': float('inf'),
                'last_executed': None
            }
        
        stats = self.query_stats[query_hash]
        stats['execution_count'] += 1
        stats['total_time'] += execution_time
        stats['avg_time'] = stats['total_time'] / stats['execution_count']
        stats['max_time'] = max(stats['max_time'], execution_time)
        stats['min_time'] = min(stats['min_time'], execution_time)
        stats['last_executed'] = datetime.now()
        
        # 记录慢查询
        if execution_time > self.slow_query_threshold:
            logger.warning(f"慢查询检测: {query[:100]}... 执行时间: {execution_time:.3f}秒")
    
    def get_query_statistics(self) -> Dict[str, Any]:
        """获取查询统计信息"""
        total_queries = len(self.query_stats)
        slow_queries = sum(1 for stats in self.query_stats.values() 
                          if stats['avg_time'] > self.slow_query_threshold)
        
        return {
            'total_queries': total_queries,
            'slow_queries': slow_queries,
            'slow_query_ratio': (slow_queries / total_queries * 100) if total_queries > 0 else 0,
            'slow_query_threshold': self.slow_query_threshold,
            'top_slow_queries': sorted(
                self.query_stats.values(),
                key=lambda x: x['avg_time'],
                reverse=True
            )[:10]
        }
    
    def optimize_query(self, query: str) -> str:
        """优化查询语句"""
        # 这里可以实现查询优化逻辑
        # 例如：添加索引提示、重写查询等
        optimized = query
        
        # 1. 添加适当的索引提示
        if 'SELECT' in query.upper() and 'FORCE INDEX' not in query.upper():
            # 可以根据表结构和查询模式添加索引提示
            pass
        
        # 2. 优化JOIN操作
        if 'JOIN' in query.upper():
            # 确保JOIN字段有索引
            pass
        
        # 3. 优化WHERE条件
        if 'WHERE' in query.upper():
            # 确保WHERE条件中的字段有索引
            pass
        
        return optimized
    
    def suggest_indexes(self, table_name: str, db: Session) -> List[Dict[str, Any]]:
        """为表建议索引"""
        suggestions = []
        
        try:
            # 获取表的查询统计
            table_queries = [stats for stats in self.query_stats.values() 
                           if table_name in stats['query']]
            
            if not table_queries:
                return suggestions
            
            # 分析查询模式
            column_usage = {}
            for query_stats in table_queries:
                query = query_stats['query'].upper()
                
                # 提取WHERE条件中的列
                if 'WHERE' in query:
                    where_part = query.split('WHERE')[1].split('GROUP BY')[0].split('ORDER BY')[0]
                    columns = self._extract_columns_from_where(where_part)
                    for col in columns:
                        column_usage[col] = column_usage.get(col, 0) + query_stats['execution_count']
                
                # 提取JOIN条件中的列
                if 'JOIN' in query:
                    join_parts = query.split('JOIN')[1:]
                    for join_part in join_parts:
                        if 'ON' in join_part:
                            on_part = join_part.split('ON')[1].split('AND')[0].split('WHERE')[0]
                            columns = self._extract_columns_from_join(on_part)
                            for col in columns:
                                column_usage[col] = column_usage.get(col, 0) + query_stats['execution_count']
            
            # 生成索引建议
            for column, usage_count in column_usage.items():
                if usage_count > 10:  # 如果列被频繁使用
                    suggestions.append({
                        'column': column,
                        'usage_count': usage_count,
                        'index_type': 'BTREE',
                        'reason': f'该列在查询中被使用了{usage_count}次',
                        'priority': 'high' if usage_count > 50 else 'medium'
                    })
            
        except Exception as e:
            logger.error(f"生成索引建议失败: {e}")
        
        return suggestions
    
    def _extract_columns_from_where(self, where_clause: str) -> List[str]:
        """从WHERE子句中提取列名"""
        columns = []
        
        # 简单的列名提取逻辑
        # 这里可以根据实际需求进行更复杂的解析
        words = where_clause.split()
        for i, word in enumerate(words):
            if word in ('=', '>', '<', '>=', '<=', '!=', '<>', 'LIKE', 'IN'):
                if i > 0:
                    column = words[i-1].strip('()')
                    if '.' in column:
                        column = column.split('.')[1]
                    if column not in columns:
                        columns.append(column)
        
        return columns
    
    def _extract_columns_from_join(self, join_clause: str) -> List[str]:
        """从JOIN子句中提取列名"""
        columns = []
        
        # 简单的列名提取逻辑
        if '=' in join_clause:
            parts = join_clause.split('=')
            for part in parts:
                column = part.strip().strip('()')
                if '.' in column:
                    column = column.split('.')[1]
                if column not in columns:
                    columns.append(column)
        
        return columns


class PerformanceMonitor:
    """性能监控服务"""
    
    def __init__(self):
        self.query_times = []
        self.cache_hits = 0
        self.cache_misses = 0
        
    def record_query_time(self, query_time: float):
        """记录查询时间"""
        self.query_times.append({
            'time': query_time,
            'timestamp': datetime.now()
        })
        
        # 保留最近1000条记录
        if len(self.query_times) > 1000:
            self.query_times = self.query_times[-1000:]
    
    def record_cache_hit(self):
        """记录缓存命中"""
        self.cache_hits += 1
    
    def record_cache_miss(self):
        """记录缓存未命中"""
        self.cache_misses += 1
    
    def get_performance_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        if not self.query_times:
            return {
                'avg_query_time': 0,
                'max_query_time': 0,
                'min_query_time': 0,
                'total_queries': 0,
                'cache_hit_rate': 0
            }
        
        query_times = [qt['time'] for qt in self.query_times]
        
        return {
            'avg_query_time': sum(query_times) / len(query_times),
            'max_query_time': max(query_times),
            'min_query_time': min(query_times),
            'total_queries': len(query_times),
            'cache_hit_rate': self.cache_hits / (self.cache_hits + self.cache_misses) * 100 if (self.cache_hits + self.cache_misses) > 0 else 0
        }


# 创建服务实例
query_optimizer = QueryOptimizationService()
performance_monitor = PerformanceMonitor()


def monitor_query_performance(func):
    """查询性能监控装饰器"""
    async def wrapper(*args, **kwargs):
        start_time = time.time()
        
        try:
            result = await func(*args, **kwargs)
            return result
        finally:
            execution_time = time.time() - start_time
            performance_monitor.record_query_time(execution_time)
            
            # 记录查询统计
            if hasattr(func, '__query__'):
                query_optimizer.log_query_performance(
                    func.__query__, 
                    getattr(func, '__params__', ()), 
                    execution_time
                )
    
    return wrapper


class OptimizedQuery:
    """优化查询类"""
    
    def __init__(self, db: Session):
        self.db = db
        self.optimizer = query_optimizer
        self.monitor = performance_monitor
    
    async def execute_cached_query(
        self, 
        query: str, 
        params: tuple = (),
        ttl: int = 1800,
        force_refresh: bool = False
    ) -> Any:
        """执行缓存查询"""
        # 检查缓存
        if not force_refresh:
            cached_result = await query_cache.get_query_result(query, params)
            if cached_result is not None:
                self.monitor.record_cache_hit()
                return cached_result
        
        self.monitor.record_cache_miss()
        
        # 执行查询
        start_time = time.time()
        try:
            result = self.db.execute(text(query), params).fetchall()
            execution_time = time.time() - start_time
            
            # 记录性能
            self.optimizer.log_query_performance(query, params, execution_time)
            
            # 缓存结果
            await query_cache.set_query_result(query, params, result)
            
            return result
        except Exception as e:
            logger.error(f"执行查询失败: {e}")
            raise
    
    async def execute_paginated_query(
        self,
        query: str,
        params: tuple = (),
        page: int = 1,
        page_size: int = 20,
        count_query: Optional[str] = None
    ) -> Dict[str, Any]:
        """执行分页查询"""
        try:
            # 获取总数
            if count_query:
                total = self.db.execute(text(count_query), params).scalar()
            else:
                # 自动生成计数查询
                count_query = f"SELECT COUNT(*) FROM ({query}) as subquery"
                total = self.db.execute(text(count_query), params).scalar()
            
            # 执行分页查询
            offset = (page - 1) * page_size
            paginated_query = f"{query} LIMIT {page_size} OFFSET {offset}"
            
            start_time = time.time()
            result = self.db.execute(text(paginated_query), params).fetchall()
            execution_time = time.time() - start_time
            
            # 记录性能
            self.optimizer.log_query_performance(paginated_query, params, execution_time)
            
            return {
                'data': result,
                'total': total,
                'page': page,
                'page_size': page_size,
                'total_pages': (total + page_size - 1) // page_size
            }
        except Exception as e:
            logger.error(f"执行分页查询失败: {e}")
            raise
    
    async def execute_batch_query(
        self,
        queries: List[tuple],
        use_cache: bool = True
    ) -> List[Any]:
        """执行批量查询"""
        results = []
        
        for query, params in queries:
            if use_cache:
                result = await self.execute_cached_query(query, params)
            else:
                start_time = time.time()
                result = self.db.execute(text(query), params).fetchall()
                execution_time = time.time() - start_time
                self.optimizer.log_query_performance(query, params, execution_time)
            
            results.append(result)
        
        return results
    
    def build_optimized_select(
        self,
        table_name: str,
        columns: List[str] = None,
        where_conditions: List[str] = None,
        join_conditions: List[str] = None,
        group_by: List[str] = None,
        order_by: List[str] = None,
        limit: int = None,
        offset: int = None
    ) -> str:
        """构建优化的SELECT查询"""
        
        # 构建SELECT部分
        if columns:
            select_clause = ", ".join(columns)
        else:
            select_clause = "*"
        
        query = f"SELECT {select_clause} FROM {table_name}"
        
        # 构建JOIN部分
        if join_conditions:
            query += " " + " ".join(join_conditions)
        
        # 构建WHERE部分
        if where_conditions:
            query += " WHERE " + " AND ".join(where_conditions)
        
        # 构建GROUP BY部分
        if group_by:
            query += " GROUP BY " + ", ".join(group_by)
        
        # 构建ORDER BY部分
        if order_by:
            query += " ORDER BY " + ", ".join(order_by)
        
        # 构建LIMIT和OFFSET
        if limit is not None:
            query += f" LIMIT {limit}"
            if offset is not None:
                query += f" OFFSET {offset}"
        
        return query
    
    async def get_table_statistics(self, table_name: str) -> Dict[str, Any]:
        """获取表统计信息"""
        try:
            # 获取表的行数
            count_query = f"SELECT COUNT(*) as total_rows FROM {table_name}"
            total_rows = self.db.execute(text(count_query)).scalar()
            
            # 获取表的大小（SQLite特有）
            size_query = f"SELECT COUNT(*) * 8192 as estimated_size FROM {table_name}"
            estimated_size = self.db.execute(text(size_query)).scalar()
            
            # 获取索引信息
            index_query = f"PRAGMA index_list({table_name})"
            indexes = self.db.execute(text(index_query)).fetchall()
            
            return {
                'table_name': table_name,
                'total_rows': total_rows,
                'estimated_size': estimated_size,
                'index_count': len(indexes),
                'indexes': [{'name': idx[1], 'unique': bool(idx[2])} for idx in indexes]
            }
        except Exception as e:
            logger.error(f"获取表统计信息失败: {e}")
            return {}


# 创建优化查询实例
def get_optimized_query(db: Session) -> OptimizedQuery:
    """获取优化查询实例"""
    return OptimizedQuery(db)


async def get_database_performance_report(db: Session) -> Dict[str, Any]:
    """获取数据库性能报告"""
    try:
        report = {
            'query_statistics': query_optimizer.get_query_statistics(),
            'performance_metrics': performance_monitor.get_performance_metrics(),
            'cache_statistics': await cache_statistics(),
            'table_statistics': {}
        }
        
        # 获取主要表的统计信息
        tables = ['petition_data', 'analysis_result', 'analysis_task', 'report', 'user']
        optimized_query = get_optimized_query(db)
        
        for table in tables:
            try:
                stats = await optimized_query.get_table_statistics(table)
                report['table_statistics'][table] = stats
            except Exception as e:
                logger.warning(f"获取表{table}统计信息失败: {e}")
        
        return report
    except Exception as e:
        logger.error(f"生成数据库性能报告失败: {e}")
        return {}