"""
数据库性能优化服务

提供数据库索引优化、查询性能优化和读写分离机制，包括：
- 数据库索引优化
- 查询性能分析和优化
- 读写分离机制
- 连接池管理
- 查询缓存
"""
import asyncio
import json
import time
import hashlib
from typing import Dict, List, Any, Optional, Tuple, Union
from datetime import datetime, timedelta
from dataclasses import dataclass, field
from collections import defaultdict, deque
from enum import Enum
import logging

try:
    import asyncpg
    import aiopg
    POSTGRES_AVAILABLE = True
except ImportError:
    POSTGRES_AVAILABLE = False

try:
    import aiomysql
    MYSQL_AVAILABLE = True
except ImportError:
    MYSQL_AVAILABLE = False

try:
    import aiosqlite
    SQLITE_AVAILABLE = True
except ImportError:
    SQLITE_AVAILABLE = False


class DatabaseType(Enum):
    """数据库类型枚举"""
    POSTGRESQL = "postgresql"
    MYSQL = "mysql"
    SQLITE = "sqlite"


class QueryType(Enum):
    """查询类型枚举"""
    SELECT = "select"
    INSERT = "insert"
    UPDATE = "update"
    DELETE = "delete"
    CREATE = "create"
    ALTER = "alter"
    DROP = "drop"


@dataclass
class QueryStats:
    """查询统计"""
    query_hash: str
    query_type: QueryType
    execution_count: int = 0
    total_execution_time: float = 0.0
    average_execution_time: float = 0.0
    min_execution_time: float = float('inf')
    max_execution_time: float = 0.0
    last_executed: Optional[datetime] = None
    error_count: int = 0
    rows_affected_total: int = 0


@dataclass
class IndexRecommendation:
    """索引推荐"""
    table_name: str
    columns: List[str]
    index_type: str = "btree"
    reason: str = ""
    estimated_benefit: float = 0.0
    creation_cost: float = 0.0
    maintenance_cost: float = 0.0


@dataclass
class DatabaseConnection:
    """数据库连接"""
    connection_id: str
    database_type: DatabaseType
    connection: Any
    is_read_only: bool = False
    created_at: datetime = field(default_factory=datetime.now)
    last_used: datetime = field(default_factory=datetime.now)
    query_count: int = 0
    is_active: bool = True


class ConnectionPool:
    """连接池"""
    
    def __init__(self, database_type: DatabaseType, connection_params: Dict[str, Any],
                 min_connections: int = 5, max_connections: int = 20):
        self.database_type = database_type
        self.connection_params = connection_params
        self.min_connections = min_connections
        self.max_connections = max_connections
        
        # 连接池
        self.read_connections: deque = deque()
        self.write_connections: deque = deque()
        self.active_connections: Dict[str, DatabaseConnection] = {}
        
        # 统计信息
        self.stats = {
            'total_connections_created': 0,
            'total_connections_closed': 0,
            'current_active_connections': 0,
            'peak_connections': 0,
            'connection_wait_time_total': 0.0,
            'connection_requests': 0
        }
        
        # 连接池状态
        self.is_initialized = False
        self.cleanup_task: Optional[asyncio.Task] = None
    
    async def initialize(self):
        """初始化连接池"""
        if self.is_initialized:
            return
        
        # 创建最小数量的连接
        for i in range(self.min_connections):
            # 创建读连接
            read_conn = await self._create_connection(is_read_only=True)
            if read_conn:
                self.read_connections.append(read_conn)
            
            # 创建写连接
            write_conn = await self._create_connection(is_read_only=False)
            if write_conn:
                self.write_connections.append(write_conn)
        
        # 启动清理任务
        self.cleanup_task = asyncio.create_task(self._cleanup_loop())
        
        self.is_initialized = True
        print(f"✅ 数据库连接池已初始化 ({self.database_type.value})")
    
    async def get_connection(self, read_only: bool = True) -> Optional[DatabaseConnection]:
        """获取连接"""
        start_time = time.time()
        self.stats['connection_requests'] += 1
        
        try:
            # 选择合适的连接池
            pool = self.read_connections if read_only else self.write_connections
            
            # 尝试从池中获取连接
            if pool:
                connection = pool.popleft()
                connection.last_used = datetime.now()
                connection.query_count += 1
                
                # 记录等待时间
                wait_time = time.time() - start_time
                self.stats['connection_wait_time_total'] += wait_time
                
                return connection
            
            # 如果池中没有连接且未达到最大连接数，创建新连接
            if len(self.active_connections) < self.max_connections:
                new_connection = await self._create_connection(read_only)
                if new_connection:
                    return new_connection
            
            # 等待连接可用
            await asyncio.sleep(0.1)
            return await self.get_connection(read_only)
            
        except Exception as e:
            print(f"获取数据库连接失败: {e}")
            return None
    
    async def return_connection(self, connection: DatabaseConnection):
        """归还连接"""
        try:
            if connection.is_active:
                # 归还到相应的池
                if connection.is_read_only:
                    self.read_connections.append(connection)
                else:
                    self.write_connections.append(connection)
            else:
                # 连接已失效，关闭它
                await self._close_connection(connection)
                
        except Exception as e:
            print(f"归还数据库连接失败: {e}")
    
    async def _create_connection(self, is_read_only: bool = True) -> Optional[DatabaseConnection]:
        """创建数据库连接"""
        try:
            connection_id = f"conn_{int(time.time() * 1000)}"
            
            if self.database_type == DatabaseType.POSTGRESQL and POSTGRES_AVAILABLE:
                conn = await asyncpg.connect(**self.connection_params)
                
                # 设置只读模式
                if is_read_only:
                    await conn.execute("SET default_transaction_read_only = on")
                
            elif self.database_type == DatabaseType.MYSQL and MYSQL_AVAILABLE:
                conn = await aiomysql.connect(**self.connection_params)
                
            elif self.database_type == DatabaseType.SQLITE and SQLITE_AVAILABLE:
                conn = await aiosqlite.connect(self.connection_params.get('database', ':memory:'))
                
            else:
                raise ValueError(f"不支持的数据库类型: {self.database_type}")
            
            db_connection = DatabaseConnection(
                connection_id=connection_id,
                database_type=self.database_type,
                connection=conn,
                is_read_only=is_read_only
            )
            
            self.active_connections[connection_id] = db_connection
            self.stats['total_connections_created'] += 1
            self.stats['current_active_connections'] = len(self.active_connections)
            
            if self.stats['current_active_connections'] > self.stats['peak_connections']:
                self.stats['peak_connections'] = self.stats['current_active_connections']
            
            return db_connection
            
        except Exception as e:
            print(f"创建数据库连接失败: {e}")
            return None
    
    async def _close_connection(self, connection: DatabaseConnection):
        """关闭数据库连接"""
        try:
            if connection.connection:
                await connection.connection.close()
            
            if connection.connection_id in self.active_connections:
                del self.active_connections[connection.connection_id]
            
            self.stats['total_connections_closed'] += 1
            self.stats['current_active_connections'] = len(self.active_connections)
            
        except Exception as e:
            print(f"关闭数据库连接失败: {e}")
    
    async def _cleanup_loop(self):
        """连接清理循环"""
        while True:
            try:
                await asyncio.sleep(60)  # 每分钟清理一次
                await self._cleanup_idle_connections()
            except asyncio.CancelledError:
                break
            except Exception as e:
                print(f"连接清理失败: {e}")
    
    async def _cleanup_idle_connections(self):
        """清理空闲连接"""
        current_time = datetime.now()
        idle_threshold = timedelta(minutes=10)  # 10分钟空闲阈值
        
        connections_to_close = []
        
        # 检查读连接池
        for connection in list(self.read_connections):
            if current_time - connection.last_used > idle_threshold:
                self.read_connections.remove(connection)
                connections_to_close.append(connection)
        
        # 检查写连接池
        for connection in list(self.write_connections):
            if current_time - connection.last_used > idle_threshold:
                self.write_connections.remove(connection)
                connections_to_close.append(connection)
        
        # 关闭空闲连接
        for connection in connections_to_close:
            await self._close_connection(connection)
        
        if connections_to_close:
            print(f"清理了 {len(connections_to_close)} 个空闲连接")
    
    async def close_all(self):
        """关闭所有连接"""
        # 停止清理任务
        if self.cleanup_task:
            self.cleanup_task.cancel()
            try:
                await self.cleanup_task
            except asyncio.CancelledError:
                pass
        
        # 关闭所有活跃连接
        for connection in list(self.active_connections.values()):
            await self._close_connection(connection)
        
        self.read_connections.clear()
        self.write_connections.clear()
        
        print(f"✅ 数据库连接池已关闭")
    
    def get_stats(self) -> Dict[str, Any]:
        """获取连接池统计"""
        avg_wait_time = 0.0
        if self.stats['connection_requests'] > 0:
            avg_wait_time = self.stats['connection_wait_time_total'] / self.stats['connection_requests']
        
        return {
            'database_type': self.database_type.value,
            'pool_config': {
                'min_connections': self.min_connections,
                'max_connections': self.max_connections
            },
            'current_state': {
                'read_connections_available': len(self.read_connections),
                'write_connections_available': len(self.write_connections),
                'total_active_connections': len(self.active_connections)
            },
            'statistics': {
                **self.stats,
                'average_connection_wait_time': avg_wait_time
            }
        }class 
QueryOptimizer:
    """查询优化器"""
    
    def __init__(self, database_type: DatabaseType):
        self.database_type = database_type
        self.query_stats: Dict[str, QueryStats] = {}
        self.slow_query_threshold = 1.0  # 1秒
        self.query_cache: Dict[str, Any] = {}
        self.cache_ttl = 300  # 5分钟
        
        # 查询模式分析
        self.query_patterns = defaultdict(list)
        self.table_access_patterns = defaultdict(int)
        self.column_access_patterns = defaultdict(int)
    
    def analyze_query(self, query: str) -> Dict[str, Any]:
        """分析查询"""
        query_hash = self._hash_query(query)
        query_type = self._detect_query_type(query)
        
        analysis = {
            'query_hash': query_hash,
            'query_type': query_type,
            'tables_accessed': self._extract_tables(query),
            'columns_accessed': self._extract_columns(query),
            'has_where_clause': 'WHERE' in query.upper(),
            'has_join': any(join in query.upper() for join in ['JOIN', 'INNER JOIN', 'LEFT JOIN', 'RIGHT JOIN']),
            'has_order_by': 'ORDER BY' in query.upper(),
            'has_group_by': 'GROUP BY' in query.upper(),
            'estimated_complexity': self._estimate_complexity(query)
        }
        
        return analysis
    
    def record_query_execution(self, query: str, execution_time: float, 
                             rows_affected: int = 0, error: Optional[str] = None):
        """记录查询执行"""
        query_hash = self._hash_query(query)
        query_type = self._detect_query_type(query)
        
        if query_hash not in self.query_stats:
            self.query_stats[query_hash] = QueryStats(
                query_hash=query_hash,
                query_type=query_type
            )
        
        stats = self.query_stats[query_hash]
        stats.execution_count += 1
        stats.total_execution_time += execution_time
        stats.average_execution_time = stats.total_execution_time / stats.execution_count
        stats.min_execution_time = min(stats.min_execution_time, execution_time)
        stats.max_execution_time = max(stats.max_execution_time, execution_time)
        stats.last_executed = datetime.now()
        stats.rows_affected_total += rows_affected
        
        if error:
            stats.error_count += 1
        
        # 记录访问模式
        tables = self._extract_tables(query)
        for table in tables:
            self.table_access_patterns[table] += 1
        
        columns = self._extract_columns(query)
        for column in columns:
            self.column_access_patterns[column] += 1
    
    def get_slow_queries(self, limit: int = 10) -> List[Dict[str, Any]]:
        """获取慢查询"""
        slow_queries = [
            {
                'query_hash': stats.query_hash,
                'query_type': stats.query_type.value,
                'execution_count': stats.execution_count,
                'average_execution_time': stats.average_execution_time,
                'max_execution_time': stats.max_execution_time,
                'total_time': stats.total_execution_time
            }
            for stats in self.query_stats.values()
            if stats.average_execution_time > self.slow_query_threshold
        ]
        
        # 按平均执行时间排序
        slow_queries.sort(key=lambda x: x['average_execution_time'], reverse=True)
        
        return slow_queries[:limit]
    
    def get_optimization_recommendations(self) -> List[Dict[str, Any]]:
        """获取优化建议"""
        recommendations = []
        
        # 基于慢查询的建议
        slow_queries = self.get_slow_queries(5)
        for query in slow_queries:
            recommendations.append({
                'type': 'slow_query_optimization',
                'priority': 'high',
                'description': f'查询执行时间过长 (平均 {query["average_execution_time"]:.2f}s)',
                'suggestion': '考虑添加索引或重写查询',
                'query_hash': query['query_hash']
            })
        
        # 基于访问模式的建议
        frequent_tables = sorted(self.table_access_patterns.items(), 
                               key=lambda x: x[1], reverse=True)[:5]
        
        for table, access_count in frequent_tables:
            if access_count > 100:  # 高频访问表
                recommendations.append({
                    'type': 'index_recommendation',
                    'priority': 'medium',
                    'description': f'表 {table} 访问频率很高 ({access_count} 次)',
                    'suggestion': '考虑为常用查询条件添加索引',
                    'table_name': table
                })
        
        # 基于错误率的建议
        error_prone_queries = [
            stats for stats in self.query_stats.values()
            if stats.error_count > 0 and stats.error_count / stats.execution_count > 0.1
        ]
        
        for stats in error_prone_queries:
            recommendations.append({
                'type': 'error_reduction',
                'priority': 'high',
                'description': f'查询错误率较高 ({stats.error_count}/{stats.execution_count})',
                'suggestion': '检查查询语法和数据完整性',
                'query_hash': stats.query_hash
            })
        
        return recommendations
    
    def _hash_query(self, query: str) -> str:
        """生成查询哈希"""
        # 标准化查询（移除多余空格，转换为小写）
        normalized = ' '.join(query.lower().split())
        return hashlib.md5(normalized.encode()).hexdigest()[:16]
    
    def _detect_query_type(self, query: str) -> QueryType:
        """检测查询类型"""
        query_upper = query.upper().strip()
        
        if query_upper.startswith('SELECT'):
            return QueryType.SELECT
        elif query_upper.startswith('INSERT'):
            return QueryType.INSERT
        elif query_upper.startswith('UPDATE'):
            return QueryType.UPDATE
        elif query_upper.startswith('DELETE'):
            return QueryType.DELETE
        elif query_upper.startswith('CREATE'):
            return QueryType.CREATE
        elif query_upper.startswith('ALTER'):
            return QueryType.ALTER
        elif query_upper.startswith('DROP'):
            return QueryType.DROP
        else:
            return QueryType.SELECT  # 默认
    
    def _extract_tables(self, query: str) -> List[str]:
        """提取查询中的表名"""
        import re
        
        # 简化的表名提取（实际实现需要更复杂的SQL解析）
        tables = []
        
        # 匹配FROM子句中的表名
        from_pattern = r'FROM\s+(\w+)'
        from_matches = re.findall(from_pattern, query, re.IGNORECASE)
        tables.extend(from_matches)
        
        # 匹配JOIN子句中的表名
        join_pattern = r'JOIN\s+(\w+)'
        join_matches = re.findall(join_pattern, query, re.IGNORECASE)
        tables.extend(join_matches)
        
        # 匹配INSERT INTO中的表名
        insert_pattern = r'INSERT\s+INTO\s+(\w+)'
        insert_matches = re.findall(insert_pattern, query, re.IGNORECASE)
        tables.extend(insert_matches)
        
        # 匹配UPDATE中的表名
        update_pattern = r'UPDATE\s+(\w+)'
        update_matches = re.findall(update_pattern, query, re.IGNORECASE)
        tables.extend(update_matches)
        
        return list(set(tables))  # 去重
    
    def _extract_columns(self, query: str) -> List[str]:
        """提取查询中的列名"""
        import re
        
        columns = []
        
        # 匹配WHERE子句中的列名
        where_pattern = r'WHERE\s+(\w+)'
        where_matches = re.findall(where_pattern, query, re.IGNORECASE)
        columns.extend(where_matches)
        
        # 匹配ORDER BY中的列名
        order_pattern = r'ORDER\s+BY\s+(\w+)'
        order_matches = re.findall(order_pattern, query, re.IGNORECASE)
        columns.extend(order_matches)
        
        return list(set(columns))  # 去重
    
    def _estimate_complexity(self, query: str) -> int:
        """估算查询复杂度"""
        complexity = 1
        query_upper = query.upper()
        
        # 基于查询特征增加复杂度
        if 'JOIN' in query_upper:
            complexity += query_upper.count('JOIN') * 2
        
        if 'SUBQUERY' in query_upper or '(' in query:
            complexity += 3
        
        if 'GROUP BY' in query_upper:
            complexity += 2
        
        if 'ORDER BY' in query_upper:
            complexity += 1
        
        if 'HAVING' in query_upper:
            complexity += 2
        
        return complexity


class IndexOptimizer:
    """索引优化器"""
    
    def __init__(self, database_type: DatabaseType):
        self.database_type = database_type
        self.existing_indexes: Dict[str, List[str]] = {}  # table -> [indexes]
        self.query_patterns: List[Dict[str, Any]] = []
    
    async def analyze_indexes(self, connection: DatabaseConnection) -> Dict[str, Any]:
        """分析现有索引"""
        try:
            if self.database_type == DatabaseType.POSTGRESQL:
                return await self._analyze_postgresql_indexes(connection)
            elif self.database_type == DatabaseType.MYSQL:
                return await self._analyze_mysql_indexes(connection)
            elif self.database_type == DatabaseType.SQLITE:
                return await self._analyze_sqlite_indexes(connection)
            else:
                return {'error': f'不支持的数据库类型: {self.database_type}'}
                
        except Exception as e:
            return {'error': f'索引分析失败: {e}'}
    
    async def _analyze_postgresql_indexes(self, connection: DatabaseConnection) -> Dict[str, Any]:
        """分析PostgreSQL索引"""
        query = """
        SELECT 
            schemaname,
            tablename,
            indexname,
            indexdef
        FROM pg_indexes 
        WHERE schemaname = 'public'
        ORDER BY tablename, indexname
        """
        
        try:
            rows = await connection.connection.fetch(query)
            
            indexes_by_table = defaultdict(list)
            for row in rows:
                table_name = row['tablename']
                index_info = {
                    'name': row['indexname'],
                    'definition': row['indexdef'],
                    'columns': self._extract_columns_from_index_def(row['indexdef'])
                }
                indexes_by_table[table_name].append(index_info)
            
            return {
                'total_indexes': len(rows),
                'indexes_by_table': dict(indexes_by_table),
                'analysis_timestamp': datetime.now().isoformat()
            }
            
        except Exception as e:
            return {'error': f'PostgreSQL索引分析失败: {e}'}
    
    async def _analyze_mysql_indexes(self, connection: DatabaseConnection) -> Dict[str, Any]:
        """分析MySQL索引"""
        # MySQL索引分析实现
        return {'message': 'MySQL索引分析待实现'}
    
    async def _analyze_sqlite_indexes(self, connection: DatabaseConnection) -> Dict[str, Any]:
        """分析SQLite索引"""
        # SQLite索引分析实现
        return {'message': 'SQLite索引分析待实现'}
    
    def _extract_columns_from_index_def(self, index_def: str) -> List[str]:
        """从索引定义中提取列名"""
        import re
        
        # 简化的列名提取
        match = re.search(r'\((.*?)\)', index_def)
        if match:
            columns_str = match.group(1)
            columns = [col.strip() for col in columns_str.split(',')]
            return columns
        
        return []
    
    def generate_index_recommendations(self, query_stats: Dict[str, QueryStats]) -> List[IndexRecommendation]:
        """生成索引推荐"""
        recommendations = []
        
        # 基于慢查询生成索引推荐
        slow_queries = [
            stats for stats in query_stats.values()
            if stats.average_execution_time > 1.0 and stats.execution_count > 10
        ]
        
        for stats in slow_queries:
            if stats.query_type == QueryType.SELECT:
                # 为SELECT查询推荐索引
                recommendation = IndexRecommendation(
                    table_name="unknown_table",  # 需要从查询中提取
                    columns=["unknown_column"],   # 需要从查询中提取
                    reason=f"优化慢查询 (平均执行时间: {stats.average_execution_time:.2f}s)",
                    estimated_benefit=stats.average_execution_time * 0.5,  # 估算50%的性能提升
                    creation_cost=0.1,
                    maintenance_cost=0.05
                )
                recommendations.append(recommendation)
        
        return recommendations


class DatabaseOptimizationService:
    """数据库优化服务"""
    
    def __init__(self, database_configs: Dict[str, Dict[str, Any]]):
        self.database_configs = database_configs
        self.connection_pools: Dict[str, ConnectionPool] = {}
        self.query_optimizers: Dict[str, QueryOptimizer] = {}
        self.index_optimizers: Dict[str, IndexOptimizer] = {}
        
        # 服务统计
        self.service_stats = {
            'total_queries_executed': 0,
            'total_execution_time': 0.0,
            'cache_hits': 0,
            'cache_misses': 0,
            'optimization_recommendations_generated': 0
        }
        
        # 查询缓存
        self.query_cache: Dict[str, Dict[str, Any]] = {}
        self.cache_ttl = 300  # 5分钟
    
    async def initialize(self):
        """初始化服务"""
        for db_name, config in self.database_configs.items():
            try:
                # 创建连接池
                db_type = DatabaseType(config['type'])
                pool = ConnectionPool(
                    database_type=db_type,
                    connection_params=config['connection_params'],
                    min_connections=config.get('min_connections', 5),
                    max_connections=config.get('max_connections', 20)
                )
                
                await pool.initialize()
                self.connection_pools[db_name] = pool
                
                # 创建优化器
                self.query_optimizers[db_name] = QueryOptimizer(db_type)
                self.index_optimizers[db_name] = IndexOptimizer(db_type)
                
                print(f"✅ 数据库 {db_name} 优化服务已初始化")
                
            except Exception as e:
                print(f"❌ 数据库 {db_name} 初始化失败: {e}")
    
    async def execute_query(self, db_name: str, query: str, params: Optional[List] = None,
                          use_cache: bool = True, read_only: bool = True) -> Dict[str, Any]:
        """执行查询"""
        start_time = time.time()
        
        try:
            # 检查缓存
            if use_cache and read_only:
                cache_key = self._generate_cache_key(query, params)
                cached_result = self._get_from_cache(cache_key)
                if cached_result:
                    self.service_stats['cache_hits'] += 1
                    return cached_result
                else:
                    self.service_stats['cache_misses'] += 1
            
            # 获取连接
            pool = self.connection_pools.get(db_name)
            if not pool:
                raise ValueError(f"数据库 {db_name} 未配置")
            
            connection = await pool.get_connection(read_only=read_only)
            if not connection:
                raise Exception("无法获取数据库连接")
            
            try:
                # 执行查询
                if connection.database_type == DatabaseType.POSTGRESQL:
                    if params:
                        result = await connection.connection.fetch(query, *params)
                    else:
                        result = await connection.connection.fetch(query)
                    
                    # 转换为字典列表
                    result_data = [dict(row) for row in result]
                    
                elif connection.database_type == DatabaseType.SQLITE:
                    cursor = await connection.connection.execute(query, params or [])
                    rows = await cursor.fetchall()
                    
                    # 获取列名
                    columns = [description[0] for description in cursor.description] if cursor.description else []
                    result_data = [dict(zip(columns, row)) for row in rows]
                    
                else:
                    raise ValueError(f"不支持的数据库类型: {connection.database_type}")
                
                execution_time = time.time() - start_time
                
                # 记录查询统计
                optimizer = self.query_optimizers[db_name]
                optimizer.record_query_execution(
                    query=query,
                    execution_time=execution_time,
                    rows_affected=len(result_data)
                )
                
                # 更新服务统计
                self.service_stats['total_queries_executed'] += 1
                self.service_stats['total_execution_time'] += execution_time
                
                query_result = {
                    'success': True,
                    'data': result_data,
                    'row_count': len(result_data),
                    'execution_time': execution_time,
                    'from_cache': False
                }
                
                # 缓存结果
                if use_cache and read_only:
                    self._set_to_cache(cache_key, query_result)
                
                return query_result
                
            finally:
                # 归还连接
                await pool.return_connection(connection)
                
        except Exception as e:
            execution_time = time.time() - start_time
            
            # 记录错误
            if db_name in self.query_optimizers:
                self.query_optimizers[db_name].record_query_execution(
                    query=query,
                    execution_time=execution_time,
                    error=str(e)
                )
            
            return {
                'success': False,
                'error': str(e),
                'execution_time': execution_time
            }
    
    async def get_optimization_report(self, db_name: str) -> Dict[str, Any]:
        """获取优化报告"""
        if db_name not in self.query_optimizers:
            return {'error': f'数据库 {db_name} 未配置'}
        
        optimizer = self.query_optimizers[db_name]
        index_optimizer = self.index_optimizers[db_name]
        pool = self.connection_pools[db_name]
        
        # 获取连接进行索引分析
        connection = await pool.get_connection(read_only=True)
        
        try:
            report = {
                'database_name': db_name,
                'generated_at': datetime.now().isoformat(),
                'query_statistics': {
                    'total_unique_queries': len(optimizer.query_stats),
                    'slow_queries': optimizer.get_slow_queries(),
                    'query_type_distribution': self._get_query_type_distribution(optimizer),
                    'most_frequent_tables': dict(list(optimizer.table_access_patterns.items())[:10])
                },
                'index_analysis': await index_optimizer.analyze_indexes(connection) if connection else {},
                'optimization_recommendations': optimizer.get_optimization_recommendations(),
                'index_recommendations': index_optimizer.generate_index_recommendations(optimizer.query_stats),
                'connection_pool_stats': pool.get_stats(),
                'service_statistics': self.service_stats
            }
            
            self.service_stats['optimization_recommendations_generated'] += 1
            
            return report
            
        finally:
            if connection:
                await pool.return_connection(connection)
    
    def _generate_cache_key(self, query: str, params: Optional[List]) -> str:
        """生成缓存键"""
        cache_data = {
            'query': query.strip(),
            'params': params or []
        }
        cache_string = json.dumps(cache_data, sort_keys=True)
        return hashlib.md5(cache_string.encode()).hexdigest()
    
    def _get_from_cache(self, cache_key: str) -> Optional[Dict[str, Any]]:
        """从缓存获取结果"""
        if cache_key in self.query_cache:
            cache_entry = self.query_cache[cache_key]
            
            # 检查是否过期
            if time.time() - cache_entry['timestamp'] < self.cache_ttl:
                result = cache_entry['result'].copy()
                result['from_cache'] = True
                return result
            else:
                # 删除过期缓存
                del self.query_cache[cache_key]
        
        return None
    
    def _set_to_cache(self, cache_key: str, result: Dict[str, Any]):
        """设置缓存"""
        self.query_cache[cache_key] = {
            'result': result.copy(),
            'timestamp': time.time()
        }
        
        # 限制缓存大小
        if len(self.query_cache) > 1000:
            # 删除最旧的缓存项
            oldest_key = min(self.query_cache.keys(), 
                           key=lambda k: self.query_cache[k]['timestamp'])
            del self.query_cache[oldest_key]
    
    def _get_query_type_distribution(self, optimizer: QueryOptimizer) -> Dict[str, int]:
        """获取查询类型分布"""
        distribution = defaultdict(int)
        
        for stats in optimizer.query_stats.values():
            distribution[stats.query_type.value] += stats.execution_count
        
        return dict(distribution)
    
    async def cleanup_cache(self):
        """清理过期缓存"""
        current_time = time.time()
        expired_keys = [
            key for key, entry in self.query_cache.items()
            if current_time - entry['timestamp'] > self.cache_ttl
        ]
        
        for key in expired_keys:
            del self.query_cache[key]
        
        if expired_keys:
            print(f"清理了 {len(expired_keys)} 个过期缓存项")
    
    async def shutdown(self):
        """关闭服务"""
        print("🛑 正在关闭数据库优化服务...")
        
        # 关闭所有连接池
        for db_name, pool in self.connection_pools.items():
            await pool.close_all()
            print(f"✅ 数据库 {db_name} 连接池已关闭")
        
        # 清理缓存
        self.query_cache.clear()
        
        print("✅ 数据库优化服务已关闭")