"""
文件名: database_optimization.py
描述: 数据库查询优化工具

本模块包含以下主要功能：
1. 数据库连接池优化
2. 查询缓存策略
3. 批量操作优化
4. 索引建议和分析

依赖模块:
   - sqlalchemy: ORM和查询优化
   - asyncio: 异步操作支持
   - core: 配置和日志模块

使用示例:
   >>> @optimize_query
   >>> async def get_user_data(user_id):
   >>>     return await session.get(User, user_id)

注意事项:
   - 批量操作要控制单次数据量
   - 索引要根据查询模式创建
   - 连接池配置要合理
   - 事务要及时提交或回滚

作者: AI助手
创建日期: 2024-06-27
最后修改: 2024-06-27
版本: 1.0.0
"""

import time
import asyncio
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional, Callable, Union
from functools import wraps
from collections import defaultdict

from sqlalchemy import text, inspect
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload, joinedload
from sqlalchemy.sql import Select

from src.core.logger import get_logger
from src.core.database import get_db_session
from src.utils.cache import cache_result
from src.utils.performance import performance_monitor

logger = get_logger(__name__)


class QueryOptimizer:
    """查询优化器"""
    
    def __init__(self):
        self.query_stats = defaultdict(list)
        self.slow_queries = []
        self.optimization_suggestions = defaultdict(list)
    
    def analyze_query(self, query: str, execution_time: float, result_count: int = 0):
        """分析查询性能"""
        query_hash = hash(query)
        
        analysis = {
            'query': query[:200] + "..." if len(query) > 200 else query,
            'execution_time': execution_time,
            'result_count': result_count,
            'timestamp': datetime.utcnow(),
            'query_hash': query_hash,
        }
        
        self.query_stats[query_hash].append(analysis)
        
        # 检测慢查询
        if execution_time > 1.0:
            self.slow_queries.append(analysis)
            self._generate_optimization_suggestions(query, execution_time, result_count)
    
    def _generate_optimization_suggestions(self, query: str, execution_time: float, result_count: int):
        """生成优化建议"""
        suggestions = []
        query_lower = query.lower()
        
        # 检查是否缺少索引
        if 'where' in query_lower and execution_time > 2.0:
            suggestions.append("考虑在WHERE条件的字段上添加索引")
        
        # 检查是否使用了SELECT *
        if 'select *' in query_lower:
            suggestions.append("避免使用SELECT *，只选择需要的字段")
        
        # 检查是否缺少LIMIT
        if result_count > 1000 and 'limit' not in query_lower:
            suggestions.append("大结果集查询建议添加LIMIT限制")
        
        # 检查JOIN优化
        if 'join' in query_lower and execution_time > 3.0:
            suggestions.append("检查JOIN条件是否有适当的索引")
            suggestions.append("考虑使用INNER JOIN替代LEFT JOIN（如果可能）")
        
        # 检查子查询优化
        if '(' in query and 'select' in query_lower.split('(')[1] if '(' in query else False:
            suggestions.append("考虑将子查询重写为JOIN")
        
        # 检查ORDER BY优化
        if 'order by' in query_lower and execution_time > 1.5:
            suggestions.append("在ORDER BY字段上添加索引")
        
        if suggestions:
            query_hash = hash(query)
            self.optimization_suggestions[query_hash].extend(suggestions)
    
    def get_query_report(self) -> Dict[str, Any]:
        """获取查询分析报告"""
        if not self.query_stats:
            return {"message": "没有查询统计数据"}
        
        # 计算总体统计
        all_queries = []
        for queries in self.query_stats.values():
            all_queries.extend(queries)
        
        execution_times = [q['execution_time'] for q in all_queries]
        
        # 找出最慢的查询
        slowest_queries = sorted(all_queries, key=lambda x: x['execution_time'], reverse=True)[:5]
        
        # 找出最频繁的查询
        query_frequency = {}
        for query_hash, queries in self.query_stats.items():
            query_frequency[query_hash] = len(queries)
        
        most_frequent = sorted(query_frequency.items(), key=lambda x: x[1], reverse=True)[:5]
        
        return {
            "total_queries": len(all_queries),
            "avg_execution_time": sum(execution_times) / len(execution_times),
            "max_execution_time": max(execution_times),
            "slow_queries_count": len(self.slow_queries),
            "slowest_queries": [
                {
                    "query": q["query"],
                    "execution_time": q["execution_time"],
                    "suggestions": self.optimization_suggestions.get(q["query_hash"], [])
                }
                for q in slowest_queries
            ],
            "most_frequent_queries": [
                {
                    "query_hash": query_hash,
                    "frequency": frequency,
                    "suggestions": self.optimization_suggestions.get(query_hash, [])
                }
                for query_hash, frequency in most_frequent
            ]
        }


class BatchOperationManager:
    """批量操作管理器"""
    
    def __init__(self, batch_size: int = 1000):
        self.batch_size = batch_size
    
    async def batch_insert(self, session: AsyncSession, model_class, data_list: List[Dict]):
        """批量插入数据"""
        if not data_list:
            return []
        
        inserted_objects = []
        
        for i in range(0, len(data_list), self.batch_size):
            batch = data_list[i:i + self.batch_size]
            
            try:
                # 创建对象列表
                objects = [model_class(**item) for item in batch]
                session.add_all(objects)
                await session.flush()  # 获取ID但不提交
                
                inserted_objects.extend(objects)
                
                logger.info(f"批量插入完成", model=model_class.__name__, count=len(batch))
                
            except Exception as e:
                logger.error(f"批量插入失败: {e}", model=model_class.__name__)
                await session.rollback()
                raise
        
        return inserted_objects
    
    async def batch_update(self, session: AsyncSession, model_class, updates: List[Dict]):
        """批量更新数据"""
        if not updates:
            return 0
        
        updated_count = 0
        
        for i in range(0, len(updates), self.batch_size):
            batch = updates[i:i + self.batch_size]
            
            try:
                for update_data in batch:
                    # 假设每个update_data包含id字段
                    obj_id = update_data.pop('id')
                    await session.execute(
                        model_class.__table__.update()
                        .where(model_class.id == obj_id)
                        .values(**update_data)
                    )
                
                updated_count += len(batch)
                logger.info(f"批量更新完成", model=model_class.__name__, count=len(batch))
                
            except Exception as e:
                logger.error(f"批量更新失败: {e}", model=model_class.__name__)
                await session.rollback()
                raise
        
        return updated_count
    
    async def batch_delete(self, session: AsyncSession, model_class, ids: List[Any]):
        """批量删除数据"""
        if not ids:
            return 0
        
        deleted_count = 0
        
        for i in range(0, len(ids), self.batch_size):
            batch_ids = ids[i:i + self.batch_size]
            
            try:
                result = await session.execute(
                    model_class.__table__.delete()
                    .where(model_class.id.in_(batch_ids))
                )
                
                deleted_count += result.rowcount
                logger.info(f"批量删除完成", model=model_class.__name__, count=result.rowcount)
                
            except Exception as e:
                logger.error(f"批量删除失败: {e}", model=model_class.__name__)
                await session.rollback()
                raise
        
        return deleted_count


class IndexAnalyzer:
    """索引分析器"""
    
    def __init__(self):
        self.missing_indexes = []
        self.unused_indexes = []
        self.suggestions = []
    
    async def analyze_table_indexes(self, session: AsyncSession, table_name: str):
        """分析表的索引使用情况"""
        try:
            # 获取表的索引信息
            indexes_query = text("""
                SELECT 
                    indexname,
                    indexdef,
                    schemaname,
                    tablename
                FROM pg_indexes 
                WHERE tablename = :table_name
            """)
            
            result = await session.execute(indexes_query, {"table_name": table_name})
            indexes = result.fetchall()
            
            # 获取表的查询统计（如果有pg_stat_statements扩展）
            stats_query = text("""
                SELECT 
                    query,
                    calls,
                    total_time,
                    mean_time
                FROM pg_stat_statements 
                WHERE query LIKE :pattern
                LIMIT 20
            """)
            
            try:
                result = await session.execute(stats_query, {"pattern": f"%{table_name}%"})
                query_stats = result.fetchall()
                
                # 分析查询模式并建议索引
                self._analyze_query_patterns(table_name, query_stats)
                
            except Exception:
                logger.info(f"pg_stat_statements不可用，跳过查询模式分析")
            
            return {
                "table_name": table_name,
                "indexes": [
                    {
                        "name": idx.indexname,
                        "definition": idx.indexdef,
                        "schema": idx.schemaname
                    }
                    for idx in indexes
                ],
                "suggestions": self.suggestions
            }
            
        except Exception as e:
            logger.error(f"索引分析失败: {e}", table=table_name)
            return {"error": str(e)}
    
    def _analyze_query_patterns(self, table_name: str, query_stats):
        """分析查询模式并建议索引"""
        for stat in query_stats:
            query = stat.query.lower()
            
            # 检查WHERE条件中的字段
            if 'where' in query and table_name in query:
                # 简单的模式匹配，实际应用中可能需要更复杂的解析
                if 'user_id' in query:
                    self.suggestions.append(f"考虑在{table_name}.user_id上添加索引")
                
                if 'created_at' in query:
                    self.suggestions.append(f"考虑在{table_name}.created_at上添加索引")
                
                if 'status' in query:
                    self.suggestions.append(f"考虑在{table_name}.status上添加索引")
    
    async def suggest_composite_indexes(self, session: AsyncSession, table_name: str):
        """建议复合索引"""
        try:
            # 分析常见的WHERE条件组合
            composite_suggestions = []
            
            # 这里可以根据实际的查询模式来建议复合索引
            # 例如：用户相关表通常需要 (user_id, created_at) 的复合索引
            common_patterns = [
                ("user_id", "created_at"),
                ("user_id", "status"),
                ("status", "updated_at"),
            ]
            
            for pattern in common_patterns:
                suggestion = f"CREATE INDEX idx_{table_name}_{'_'.join(pattern)} ON {table_name} ({', '.join(pattern)});"
                composite_suggestions.append(suggestion)
            
            return composite_suggestions
            
        except Exception as e:
            logger.error(f"复合索引建议失败: {e}")
            return []


class ConnectionPoolOptimizer:
    """连接池优化器"""
    
    def __init__(self):
        self.connection_stats = {
            'active_connections': 0,
            'peak_connections': 0,
            'connection_errors': 0,
            'avg_query_time': 0,
        }
    
    def analyze_pool_usage(self, engine) -> Dict[str, Any]:
        """分析连接池使用情况"""
        try:
            pool = engine.pool
            
            return {
                "pool_size": pool.size(),
                "checked_in": pool.checkedin(),
                "checked_out": pool.checkedout(),
                "overflow": pool.overflow(),
                "invalid": pool.invalid(),
                "recommendations": self._get_pool_recommendations(pool)
            }
            
        except Exception as e:
            logger.error(f"连接池分析失败: {e}")
            return {"error": str(e)}
    
    def _get_pool_recommendations(self, pool) -> List[str]:
        """获取连接池优化建议"""
        recommendations = []
        
        utilization = (pool.checkedout() + pool.overflow()) / pool.size() * 100
        
        if utilization > 80:
            recommendations.append("连接池使用率过高，建议增加pool_size")
        
        if pool.overflow() > pool.size() * 0.5:
            recommendations.append("overflow连接较多，建议增加max_overflow")
        
        if pool.invalid() > 0:
            recommendations.append("存在无效连接，检查网络连接稳定性")
        
        return recommendations


# 全局实例
query_optimizer = QueryOptimizer()
batch_manager = BatchOperationManager()
index_analyzer = IndexAnalyzer()
pool_optimizer = ConnectionPoolOptimizer()


def optimize_query(cache_ttl: int = 300, enable_profiling: bool = True):
    """查询优化装饰器"""
    def decorator(func: Callable) -> Callable:
        @wraps(func)
        async def wrapper(*args, **kwargs):
            start_time = time.time()
            
            try:
                # 如果启用了缓存，先尝试从缓存获取
                if cache_ttl > 0:
                    cache_decorator = cache_result(ttl=cache_ttl, key_prefix="db_query")
                    cached_func = cache_decorator(func)
                    result = await cached_func(*args, **kwargs)
                else:
                    result = await func(*args, **kwargs)
                
                execution_time = time.time() - start_time
                
                # 记录性能指标
                if enable_profiling:
                    result_count = len(result) if isinstance(result, (list, tuple)) else 1
                    query_optimizer.analyze_query(
                        query=func.__name__,  # 这里应该是实际的SQL查询
                        execution_time=execution_time,
                        result_count=result_count
                    )
                    
                    performance_monitor.record_db_metric(
                        query_type=func.__name__,
                        query_time=execution_time
                    )
                
                return result
                
            except Exception as e:
                execution_time = time.time() - start_time
                logger.error(f"查询执行失败: {e}", function=func.__name__, time=execution_time)
                raise
        
        return wrapper
    return decorator


async def optimize_eager_loading(query: Select, relationships: List[str]) -> Select:
    """优化预加载关系"""
    for relationship in relationships:
        if '.' in relationship:
            # 嵌套关系使用selectinload
            query = query.options(selectinload(relationship))
        else:
            # 简单关系使用joinedload
            query = query.options(joinedload(relationship))
    
    return query


async def create_suggested_indexes(session: AsyncSession, table_name: str):
    """创建建议的索引"""
    suggestions = await index_analyzer.suggest_composite_indexes(session, table_name)
    
    created_indexes = []
    for suggestion in suggestions:
        try:
            await session.execute(text(suggestion))
            created_indexes.append(suggestion)
            logger.info(f"索引创建成功", sql=suggestion)
        except Exception as e:
            logger.warning(f"索引创建失败: {e}", sql=suggestion)
    
    return created_indexes


async def analyze_database_performance(session: AsyncSession) -> Dict[str, Any]:
    """分析数据库整体性能"""
    analysis = {
        "timestamp": datetime.utcnow().isoformat(),
        "query_analysis": query_optimizer.get_query_report(),
        "slow_queries": query_optimizer.slow_queries[-10:],  # 最近10个慢查询
        "optimization_suggestions": []
    }
    
    # 检查常见表的索引情况
    common_tables = ["users", "channels", "videos", "categories", "user_channels"]
    
    for table in common_tables:
        try:
            table_analysis = await index_analyzer.analyze_table_indexes(session, table)
            analysis[f"{table}_indexes"] = table_analysis
        except Exception as e:
            logger.warning(f"表{table}索引分析失败: {e}")
    
    return analysis


class QueryCache:
    """查询结果缓存"""
    
    def __init__(self, default_ttl: int = 300):
        self.default_ttl = default_ttl
        self.cache_stats = {
            "hits": 0,
            "misses": 0,
            "sets": 0,
            "errors": 0
        }
    
    @cache_result(ttl=300, key_prefix="query_cache")
    async def cached_query(self, query_key: str, query_func: Callable, *args, **kwargs):
        """执行缓存查询"""
        self.cache_stats["sets"] += 1
        return await query_func(*args, **kwargs)
    
    def get_cache_stats(self) -> Dict[str, Any]:
        """获取缓存统计"""
        total_requests = self.cache_stats["hits"] + self.cache_stats["misses"]
        hit_rate = (self.cache_stats["hits"] / total_requests * 100) if total_requests > 0 else 0
        
        return {
            **self.cache_stats,
            "total_requests": total_requests,
            "hit_rate": round(hit_rate, 2)
        }


# 全局查询缓存实例
query_cache = QueryCache()