"""
数据库性能优化器
基于测试结果优化数据库写入性能和查询效率
"""
import asyncio
import logging
import time
import sqlite3
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
from collections import deque, defaultdict
from dataclasses import dataclass, field
import threading
from concurrent.futures import ThreadPoolExecutor
import queue
import json

from sqlalchemy import text, inspect
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from core.database import get_db, engine

logger = logging.getLogger(__name__)


@dataclass
class QueryPerformanceMetric:
    """查询性能指标"""
    query_type: str
    table_name: str
    execution_time_ms: float
    rows_affected: int
    timestamp: datetime
    query_hash: str
    success: bool = True
    error_message: Optional[str] = None


@dataclass
class DatabaseOptimizationResult:
    """数据库优化结果"""
    optimization_type: str
    description: str
    performance_improvement: float
    actions_taken: List[str]
    before_metrics: Dict[str, Any]
    after_metrics: Dict[str, Any]
    timestamp: datetime = field(default_factory=datetime.now)


class DatabasePerformanceOptimizer:
    """数据库性能优化器"""
    
    def __init__(self):
        self.query_metrics: deque = deque(maxlen=10000)  # 保留最近10000个查询指标
        self.table_metrics: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))
        
        # 批量写入配置
        self.batch_size = 100
        self.batch_timeout = 5.0  # 5秒超时
        self.write_queue = queue.Queue()
        self.batch_buffer = []
        self.last_batch_time = time.time()
        
        # 连接池优化
        self.connection_pool_size = 20
        self.max_overflow = 30
        
        # 查询缓存
        self.query_cache: Dict[str, Tuple[Any, datetime]] = {}
        self.cache_ttl = 300  # 5分钟缓存
        
        # 性能统计
        self.stats = {
            'total_queries': 0,
            'slow_queries': 0,
            'cached_queries': 0,
            'batch_writes': 0,
            'optimization_count': 0,
            'avg_query_time': 0.0,
            'total_write_time': 0.0,
            'total_read_time': 0.0
        }
        
        # 优化配置
        self.optimization_config = {
            'enable_query_cache': True,
            'enable_batch_writes': True,
            'enable_connection_pooling': True,
            'enable_index_optimization': True,
            'slow_query_threshold_ms': 1000,
            'batch_write_threshold': 50,
            'cache_hit_ratio_target': 0.8,
            'max_connection_age': 3600  # 1小时
        }
        
        # 线程池
        self.executor = ThreadPoolExecutor(max_workers=4)
        
        # 监控任务
        self._monitoring_task = None
        self._batch_writer_task = None
        self._optimization_task = None
        self.is_running = False
    
    async def initialize(self):
        """初始化数据库性能优化器"""
        try:
            # 启动监控任务
            self.is_running = True
            self._monitoring_task = asyncio.create_task(self._monitoring_loop())
            self._batch_writer_task = asyncio.create_task(self._batch_writer_loop())
            self._optimization_task = asyncio.create_task(self._optimization_loop())
            
            # 初始化数据库优化
            await self._initialize_database_optimizations()
            
            logger.info("Database Performance Optimizer initialized")
            
        except Exception as e:
            logger.error(f"Failed to initialize Database Performance Optimizer: {e}")
            raise
    
    async def _initialize_database_optimizations(self):
        """初始化数据库优化设置"""
        try:
            # 设置SQLite优化参数
            optimizations = [
                "PRAGMA journal_mode = WAL",  # 启用WAL模式
                "PRAGMA synchronous = NORMAL",  # 平衡性能和安全性
                "PRAGMA cache_size = -64000",  # 64MB缓存
                "PRAGMA temp_store = MEMORY",  # 临时表存储在内存
                "PRAGMA mmap_size = 268435456",  # 256MB内存映射
                "PRAGMA optimize",  # 优化数据库
            ]
            
            async with get_db() as db:
                for pragma in optimizations:
                    await db.execute(text(pragma))
                await db.commit()
            
            logger.info("Database optimization settings applied")
            
        except Exception as e:
            logger.error(f"Failed to apply database optimizations: {e}")
    
    async def record_query_performance(self, query_type: str, table_name: str, 
                                     execution_time_ms: float, rows_affected: int = 0,
                                     query_hash: str = "", success: bool = True,
                                     error_message: Optional[str] = None):
        """记录查询性能指标"""
        try:
            metric = QueryPerformanceMetric(
                query_type=query_type,
                table_name=table_name,
                execution_time_ms=execution_time_ms,
                rows_affected=rows_affected,
                timestamp=datetime.now(),
                query_hash=query_hash,
                success=success,
                error_message=error_message
            )
            
            self.query_metrics.append(metric)
            self.table_metrics[table_name].append(metric)
            
            # 更新统计
            self.stats['total_queries'] += 1
            if execution_time_ms > self.optimization_config['slow_query_threshold_ms']:
                self.stats['slow_queries'] += 1
            
            # 更新平均查询时间
            self.stats['avg_query_time'] = (
                (self.stats['avg_query_time'] * (self.stats['total_queries'] - 1) + execution_time_ms) /
                self.stats['total_queries']
            )
            
            if query_type.upper() in ['INSERT', 'UPDATE', 'DELETE']:
                self.stats['total_write_time'] += execution_time_ms
            else:
                self.stats['total_read_time'] += execution_time_ms
            
        except Exception as e:
            logger.error(f"Failed to record query performance: {e}")
    
    async def optimize_batch_writes(self, data_batch: List[Dict[str, Any]], 
                                  table_name: str) -> DatabaseOptimizationResult:
        """优化批量写入"""
        try:
            start_time = time.time()
            before_metrics = await self._get_table_metrics(table_name)
            
            # 批量写入优化策略
            actions_taken = []
            
            if len(data_batch) >= self.optimization_config['batch_write_threshold']:
                # 使用事务批量写入
                success_count = await self._execute_batch_write_transaction(data_batch, table_name)
                actions_taken.append(f"批量事务写入 {success_count} 条记录")
                
                # 记录批量写入统计
                self.stats['batch_writes'] += 1
            else:
                # 普通写入
                success_count = await self._execute_individual_writes(data_batch, table_name)
                actions_taken.append(f"单条写入 {success_count} 条记录")
            
            execution_time = (time.time() - start_time) * 1000
            after_metrics = await self._get_table_metrics(table_name)
            
            # 计算性能改善
            performance_improvement = self._calculate_write_performance_improvement(
                before_metrics, after_metrics, execution_time, len(data_batch)
            )
            
            return DatabaseOptimizationResult(
                optimization_type="batch_write",
                description=f"批量写入优化 - {table_name}表",
                performance_improvement=performance_improvement,
                actions_taken=actions_taken,
                before_metrics=before_metrics,
                after_metrics=after_metrics
            )
            
        except Exception as e:
            logger.error(f"Batch write optimization failed: {e}")
            return DatabaseOptimizationResult(
                optimization_type="batch_write",
                description=f"批量写入优化失败: {str(e)}",
                performance_improvement=0.0,
                actions_taken=[],
                before_metrics={},
                after_metrics={}
            )
    
    async def _execute_batch_write_transaction(self, data_batch: List[Dict[str, Any]], 
                                             table_name: str) -> int:
        """执行批量写入事务"""
        try:
            success_count = 0
            
            async with get_db() as db:
                # 开始事务
                await db.begin()
                
                try:
                    for data in data_batch:
                        # 根据表名构建插入语句
                        if table_name == "ai_results":
                            await self._insert_ai_result(db, data)
                        elif table_name == "cameras":
                            await self._insert_camera_data(db, data)
                        else:
                            await self._insert_generic_data(db, table_name, data)
                        
                        success_count += 1
                    
                    # 提交事务
                    await db.commit()
                    
                except Exception as e:
                    # 回滚事务
                    await db.rollback()
                    logger.error(f"Batch transaction failed, rolled back: {e}")
                    raise
            
            return success_count
            
        except Exception as e:
            logger.error(f"Batch write transaction failed: {e}")
            return 0
    
    async def _execute_individual_writes(self, data_batch: List[Dict[str, Any]], 
                                       table_name: str) -> int:
        """执行单条写入"""
        try:
            success_count = 0
            
            for data in data_batch:
                try:
                    async with get_db() as db:
                        if table_name == "ai_results":
                            await self._insert_ai_result(db, data)
                        elif table_name == "cameras":
                            await self._insert_camera_data(db, data)
                        else:
                            await self._insert_generic_data(db, table_name, data)
                        
                        await db.commit()
                        success_count += 1
                        
                except Exception as e:
                    logger.error(f"Individual write failed for {table_name}: {e}")
            
            return success_count
            
        except Exception as e:
            logger.error(f"Individual writes failed: {e}")
            return 0
    
    async def _insert_ai_result(self, db, data: Dict[str, Any]):
        """插入AI结果数据"""
        query = text("""
            INSERT INTO ai_results (
                camera_id, algorithm_name, result_type, result_data, confidence,
                timestamp, frame_timestamp, processing_latency, frame_id, 
                stream_info, is_real_time, batch_id, worker_id, gpu_id
            ) VALUES (
                :camera_id, :algorithm_name, :result_type, :result_data, :confidence,
                :timestamp, :frame_timestamp, :processing_latency, :frame_id,
                :stream_info, :is_real_time, :batch_id, :worker_id, :gpu_id
            )
        """)
        
        await db.execute(query, data)
    
    async def _insert_camera_data(self, db, data: Dict[str, Any]):
        """插入摄像头数据"""
        query = text("""
            UPDATE cameras SET 
                stream_status = :stream_status,
                processing_fps = :processing_fps,
                last_frame_time = :last_frame_time,
                performance_metrics = :performance_metrics,
                reconnect_count = :reconnect_count
            WHERE id = :camera_id
        """)
        
        await db.execute(query, data)
    
    async def _insert_generic_data(self, db, table_name: str, data: Dict[str, Any]):
        """插入通用数据"""
        # 构建动态插入语句
        columns = list(data.keys())
        placeholders = [f":{col}" for col in columns]
        
        query = text(f"""
            INSERT INTO {table_name} ({', '.join(columns)})
            VALUES ({', '.join(placeholders)})
        """)
        
        await db.execute(query, data)
    
    async def optimize_query_performance(self, query: str, params: Dict[str, Any] = None) -> Tuple[Any, float]:
        """优化查询性能"""
        try:
            start_time = time.time()
            
            # 生成查询缓存键
            cache_key = self._generate_cache_key(query, params)
            
            # 检查缓存
            if self.optimization_config['enable_query_cache']:
                cached_result = self._get_cached_result(cache_key)
                if cached_result is not None:
                    self.stats['cached_queries'] += 1
                    execution_time = (time.time() - start_time) * 1000
                    return cached_result, execution_time
            
            # 执行查询
            async with get_db() as db:
                if params:
                    result = await db.execute(text(query), params)
                else:
                    result = await db.execute(text(query))
                
                # 获取结果
                if query.strip().upper().startswith('SELECT'):
                    query_result = result.fetchall()
                else:
                    query_result = result.rowcount
            
            execution_time = (time.time() - start_time) * 1000
            
            # 缓存结果（仅对SELECT查询）
            if (self.optimization_config['enable_query_cache'] and 
                query.strip().upper().startswith('SELECT') and 
                execution_time < 5000):  # 只缓存快速查询
                self._cache_result(cache_key, query_result)
            
            return query_result, execution_time
            
        except Exception as e:
            execution_time = (time.time() - start_time) * 1000
            logger.error(f"Query optimization failed: {e}")
            raise
    
    def _generate_cache_key(self, query: str, params: Dict[str, Any] = None) -> str:
        """生成缓存键"""
        import hashlib
        
        cache_data = query
        if params:
            cache_data += json.dumps(params, sort_keys=True)
        
        return hashlib.md5(cache_data.encode()).hexdigest()
    
    def _get_cached_result(self, cache_key: str) -> Optional[Any]:
        """获取缓存结果"""
        if cache_key in self.query_cache:
            result, cached_time = self.query_cache[cache_key]
            
            # 检查缓存是否过期
            if (datetime.now() - cached_time).total_seconds() < self.cache_ttl:
                return result
            else:
                # 删除过期缓存
                del self.query_cache[cache_key]
        
        return None
    
    def _cache_result(self, cache_key: str, result: Any):
        """缓存查询结果"""
        # 限制缓存大小
        if len(self.query_cache) > 1000:
            # 删除最旧的缓存项
            oldest_key = min(self.query_cache.keys(), 
                           key=lambda k: self.query_cache[k][1])
            del self.query_cache[oldest_key]
        
        self.query_cache[cache_key] = (result, datetime.now())
    
    async def optimize_database_indexes(self) -> DatabaseOptimizationResult:
        """优化数据库索引"""
        try:
            start_time = time.time()
            actions_taken = []
            
            # 分析查询模式，创建必要的索引
            index_recommendations = await self._analyze_index_requirements()
            
            async with get_db() as db:
                for index_sql in index_recommendations:
                    try:
                        await db.execute(text(index_sql))
                        actions_taken.append(f"创建索引: {index_sql}")
                    except Exception as e:
                        if "already exists" not in str(e):
                            logger.error(f"Failed to create index: {e}")
                
                await db.commit()
            
            execution_time = (time.time() - start_time) * 1000
            
            return DatabaseOptimizationResult(
                optimization_type="index_optimization",
                description="数据库索引优化",
                performance_improvement=len(actions_taken) * 10.0,  # 估算每个索引10%改善
                actions_taken=actions_taken,
                before_metrics={},
                after_metrics={"indexes_created": len(actions_taken)}
            )
            
        except Exception as e:
            logger.error(f"Index optimization failed: {e}")
            return DatabaseOptimizationResult(
                optimization_type="index_optimization",
                description=f"索引优化失败: {str(e)}",
                performance_improvement=0.0,
                actions_taken=[],
                before_metrics={},
                after_metrics={}
            )
    
    async def _analyze_index_requirements(self) -> List[str]:
        """分析索引需求"""
        index_recommendations = []
        
        # 基于查询模式推荐索引
        slow_queries = [m for m in self.query_metrics 
                       if m.execution_time_ms > self.optimization_config['slow_query_threshold_ms']]
        
        # AI结果表的常用索引
        if any(m.table_name == "ai_results" for m in slow_queries):
            index_recommendations.extend([
                "CREATE INDEX IF NOT EXISTS idx_ai_results_camera_timestamp ON ai_results(camera_id, timestamp)",
                "CREATE INDEX IF NOT EXISTS idx_ai_results_algorithm ON ai_results(algorithm_name)",
                "CREATE INDEX IF NOT EXISTS idx_ai_results_frame_timestamp ON ai_results(frame_timestamp)",
                "CREATE INDEX IF NOT EXISTS idx_ai_results_batch_id ON ai_results(batch_id)",
                "CREATE INDEX IF NOT EXISTS idx_ai_results_real_time ON ai_results(is_real_time, timestamp)"
            ])
        
        # 摄像头表的索引
        if any(m.table_name == "cameras" for m in slow_queries):
            index_recommendations.extend([
                "CREATE INDEX IF NOT EXISTS idx_cameras_status ON cameras(status)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_stream_status ON cameras(stream_status)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_priority ON cameras(priority_level)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_last_frame ON cameras(last_frame_time)"
            ])
        
        return index_recommendations
    
    async def _get_table_metrics(self, table_name: str) -> Dict[str, Any]:
        """获取表性能指标"""
        try:
            async with get_db() as db:
                # 获取表统计信息
                stats_query = text(f"SELECT COUNT(*) as row_count FROM {table_name}")
                result = await db.execute(stats_query)
                row_count = result.scalar()
                
                return {
                    "row_count": row_count,
                    "timestamp": datetime.now().isoformat()
                }
                
        except Exception as e:
            logger.error(f"Failed to get table metrics for {table_name}: {e}")
            return {}
    
    def _calculate_write_performance_improvement(self, before_metrics: Dict[str, Any],
                                               after_metrics: Dict[str, Any],
                                               execution_time_ms: float,
                                               batch_size: int) -> float:
        """计算写入性能改善"""
        try:
            # 基于批量大小和执行时间计算性能改善
            if batch_size >= self.optimization_config['batch_write_threshold']:
                # 批量写入通常比单条写入快50-80%
                estimated_individual_time = batch_size * 50  # 假设单条写入50ms
                improvement = max(0, (estimated_individual_time - execution_time_ms) / estimated_individual_time * 100)
                return min(80, improvement)  # 最大80%改善
            else:
                return 0.0
                
        except Exception as e:
            logger.error(f"Failed to calculate performance improvement: {e}")
            return 0.0
    
    async def _monitoring_loop(self):
        """性能监控循环"""
        while self.is_running:
            try:
                # 每30秒检查性能指标
                await self._check_performance_metrics()
                await asyncio.sleep(30)
                
            except Exception as e:
                logger.error(f"Error in database monitoring loop: {e}")
                await asyncio.sleep(30)
    
    async def _batch_writer_loop(self):
        """批量写入循环"""
        while self.is_running:
            try:
                # 检查是否需要执行批量写入
                current_time = time.time()
                
                if (len(self.batch_buffer) >= self.batch_size or
                    (self.batch_buffer and current_time - self.last_batch_time > self.batch_timeout)):
                    
                    # 执行批量写入
                    if self.batch_buffer:
                        await self._process_batch_buffer()
                        self.last_batch_time = current_time
                
                await asyncio.sleep(1)
                
            except Exception as e:
                logger.error(f"Error in batch writer loop: {e}")
                await asyncio.sleep(1)
    
    async def _optimization_loop(self):
        """优化循环"""
        while self.is_running:
            try:
                # 每5分钟执行一次优化检查
                await self._perform_periodic_optimization()
                await asyncio.sleep(300)
                
            except Exception as e:
                logger.error(f"Error in optimization loop: {e}")
                await asyncio.sleep(300)
    
    async def _check_performance_metrics(self):
        """检查性能指标"""
        try:
            # 检查慢查询比例
            if self.stats['total_queries'] > 100:
                slow_query_ratio = self.stats['slow_queries'] / self.stats['total_queries']
                if slow_query_ratio > 0.1:  # 超过10%的慢查询
                    logger.warning(f"High slow query ratio: {slow_query_ratio:.2%}")
            
            # 检查缓存命中率
            if self.stats['total_queries'] > 50:
                cache_hit_ratio = self.stats['cached_queries'] / self.stats['total_queries']
                if cache_hit_ratio < self.optimization_config['cache_hit_ratio_target']:
                    logger.info(f"Low cache hit ratio: {cache_hit_ratio:.2%}")
            
        except Exception as e:
            logger.error(f"Performance metrics check failed: {e}")
    
    async def _process_batch_buffer(self):
        """处理批量缓冲区"""
        try:
            if self.batch_buffer:
                # 按表名分组
                table_batches = defaultdict(list)
                for item in self.batch_buffer:
                    table_name = item.get('table_name', 'unknown')
                    table_batches[table_name].append(item['data'])
                
                # 执行批量写入
                for table_name, data_batch in table_batches.items():
                    await self.optimize_batch_writes(data_batch, table_name)
                
                # 清空缓冲区
                self.batch_buffer.clear()
                
        except Exception as e:
            logger.error(f"Batch buffer processing failed: {e}")
    
    async def _perform_periodic_optimization(self):
        """执行周期性优化"""
        try:
            # 清理过期缓存
            self._cleanup_expired_cache()
            
            # 检查是否需要创建新索引
            if self.stats['slow_queries'] > 10:
                await self.optimize_database_indexes()
            
            # 更新优化统计
            self.stats['optimization_count'] += 1
            
        except Exception as e:
            logger.error(f"Periodic optimization failed: {e}")
    
    def _cleanup_expired_cache(self):
        """清理过期缓存"""
        try:
            current_time = datetime.now()
            expired_keys = []
            
            for cache_key, (result, cached_time) in self.query_cache.items():
                if (current_time - cached_time).total_seconds() > self.cache_ttl:
                    expired_keys.append(cache_key)
            
            for key in expired_keys:
                del self.query_cache[key]
            
            if expired_keys:
                logger.info(f"Cleaned up {len(expired_keys)} expired cache entries")
                
        except Exception as e:
            logger.error(f"Cache cleanup failed: {e}")
    
    async def get_performance_report(self) -> Dict[str, Any]:
        """获取性能报告"""
        try:
            # 计算各种性能指标
            total_queries = self.stats['total_queries']
            
            report = {
                'summary': {
                    'total_queries': total_queries,
                    'slow_queries': self.stats['slow_queries'],
                    'slow_query_ratio': self.stats['slow_queries'] / max(total_queries, 1),
                    'cached_queries': self.stats['cached_queries'],
                    'cache_hit_ratio': self.stats['cached_queries'] / max(total_queries, 1),
                    'avg_query_time_ms': self.stats['avg_query_time'],
                    'batch_writes': self.stats['batch_writes'],
                    'optimization_count': self.stats['optimization_count']
                },
                'performance_breakdown': {
                    'total_write_time_ms': self.stats['total_write_time'],
                    'total_read_time_ms': self.stats['total_read_time'],
                    'avg_write_time_ms': self.stats['total_write_time'] / max(self.stats['batch_writes'], 1),
                    'cache_size': len(self.query_cache)
                },
                'recent_metrics': [
                    {
                        'query_type': m.query_type,
                        'table_name': m.table_name,
                        'execution_time_ms': m.execution_time_ms,
                        'timestamp': m.timestamp.isoformat(),
                        'success': m.success
                    }
                    for m in list(self.query_metrics)[-20:]  # 最近20个查询
                ],
                'optimization_config': self.optimization_config,
                'generated_at': datetime.now().isoformat()
            }
            
            return report
            
        except Exception as e:
            logger.error(f"Failed to generate performance report: {e}")
            return {'error': str(e)}
    
    async def shutdown(self):
        """关闭数据库性能优化器"""
        try:
            self.is_running = False
            
            # 停止监控任务
            if self._monitoring_task:
                self._monitoring_task.cancel()
            if self._batch_writer_task:
                self._batch_writer_task.cancel()
            if self._optimization_task:
                self._optimization_task.cancel()
            
            # 处理剩余的批量写入
            if self.batch_buffer:
                await self._process_batch_buffer()
            
            # 关闭线程池
            self.executor.shutdown(wait=True)
            
            logger.info("Database Performance Optimizer shutdown completed")
            
        except Exception as e:
            logger.error(f"Error during database optimizer shutdown: {e}")


# 全局数据库性能优化器实例
database_optimizer = DatabasePerformanceOptimizer()