#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
性能优化服务
提供数据库查询优化、缓存策略、连接池管理等功能
"""

import asyncio
import time
import logging
from typing import Dict, List, Any, Optional, Union
from datetime import datetime, timedelta
from functools import wraps, lru_cache
from contextlib import contextmanager
import threading
from concurrent.futures import ThreadPoolExecutor
import psutil
import gc

from sqlalchemy import text, func, and_, or_
from sqlalchemy.orm import Session, joinedload, selectinload
from sqlalchemy.pool import QueuePool
from sqlalchemy.sql import Select
import pandas as pd
import numpy as np

from core.database import get_db, get_db_session
from core.logging_config import get_logger

logger = get_logger("performance_optimization")

class PerformanceOptimizationService:
    """性能优化服务类"""

    def __init__(self):
        self.query_cache = {}
        self.query_stats = {}
        self.cache_hits = 0
        self.cache_misses = 0
        self.slow_query_threshold = 1.0  # 慢查询阈值（秒）
        self.max_cache_size = 1000
        self.cache_ttl = 300  # 缓存有效期（秒）
        self._executor = ThreadPoolExecutor(max_workers=4)
        self._monitor_thread = None
        self._running = False

    def start_monitoring(self):
        """启动性能监控"""
        if not self._running:
            self._running = True
            self._monitor_thread = threading.Thread(target=self._monitor_performance, daemon=True)
            self._monitor_thread.start()
            logger.info("性能监控已启动")

    def stop_monitoring(self):
        """停止性能监控"""
        self._running = False
        if self._monitor_thread:
            self._monitor_thread.join(timeout=1)
        logger.info("性能监控已停止")

    def _monitor_performance(self):
        """性能监控线程"""
        while self._running:
            try:
                self._cleanup_expired_cache()
                self._log_performance_stats()
                time.sleep(60)  # 每分钟监控一次
            except Exception as e:
                logger.error(f"性能监控出错: {e}")
                time.sleep(10)

    def _cleanup_expired_cache(self):
        """清理过期缓存"""
        current_time = time.time()
        expired_keys = [
            key for key, (data, timestamp) in self.query_cache.items()
            if current_time - timestamp > self.cache_ttl
        ]
        for key in expired_keys:
            del self.query_cache[key]

        if expired_keys:
            logger.info(f"清理了 {len(expired_keys)} 个过期缓存项")

    def _log_performance_stats(self):
        """记录性能统计"""
        if self.query_stats:
            total_queries = sum(self.query_stats.values())
            avg_time = np.mean(list(self.query_stats.values())) if self.query_stats else 0
            cache_hit_rate = self.cache_hits / (self.cache_hits + self.cache_misses) * 100 if (self.cache_hits + self.cache_misses) > 0 else 0

            logger.info(f"性能统计 - 总查询: {total_queries}, 平均时间: {avg_time:.3f}s, 缓存命中率: {cache_hit_rate:.1f}%")

    def cached_query(self, ttl: int = None):
        """查询结果缓存装饰器"""
        def decorator(func):
            @wraps(func)
            def wrapper(*args, **kwargs):
                # 生成缓存键
                cache_key = f"{func.__name__}:{hash(str(args) + str(sorted(kwargs.items())))}"

                # 检查缓存
                if cache_key in self.query_cache:
                    data, timestamp = self.query_cache[cache_key]
                    if time.time() - timestamp < (ttl or self.cache_ttl):
                        self.cache_hits += 1
                        logger.debug(f"缓存命中: {cache_key}")
                        return data

                # 执行查询
                self.cache_misses += 1
                start_time = time.time()
                result = func(*args, **kwargs)
                execution_time = time.time() - start_time

                # 记录查询统计
                self.query_stats[cache_key] = execution_time

                # 缓存结果
                if execution_time < self.slow_query_threshold:  # 只缓存快速查询的结果
                    self.query_cache[cache_key] = (result, time.time())
                    if len(self.query_cache) > self.max_cache_size:
                        # 移除最旧的缓存项
                        oldest_key = next(iter(self.query_cache))
                        del self.query_cache[oldest_key]

                # 记录慢查询
                if execution_time > self.slow_query_threshold:
                    logger.warning(f"慢查询检测: {func.__name__} 执行时间 {execution_time:.3f}s")

                return result

            return wrapper
        return decorator

    @contextmanager
    def get_monitored_db_session(self):
        """获取数据库会话（带性能监控）"""
        start_time = time.time()
        try:
            with get_db_session() as session:
                yield session
        finally:
            execution_time = time.time() - start_time
            if execution_time > 0.5:  # 超过0.5秒的数据库操作
                logger.warning(f"数据库会话操作耗时: {execution_time:.3f}s")

    def optimize_query(self, query: Select, session: Session) -> Select:
        """优化查询"""
        # 添加索引提示
        if hasattr(query, 'statement'):
            query_text = str(query.statement)
            if 'WHERE' in query_text and 'INDEX' not in query_text:
                logger.debug("查询可能需要索引优化")

        # 限制结果集大小
        if hasattr(query, 'limit') and query.limit is None:
            # 对于大型查询，默认限制1000条记录
            logger.warning("查询未设置限制，建议添加LIMIT子句")

        return query

    def execute_optimized_query(self, query: Select, session: Session, use_cache: bool = True) -> List[Any]:
        """执行优化查询"""
        cache_key = f"query:{hash(str(query))}"

        # 检查缓存
        if use_cache and cache_key in self.query_cache:
            data, timestamp = self.query_cache[cache_key]
            if time.time() - timestamp < self.cache_ttl:
                self.cache_hits += 1
                return data

        self.cache_misses += 1
        start_time = time.time()

        try:
            result = session.execute(query)
            data = result.scalars().all() if result.returns_scalars else result.all()
            execution_time = time.time() - start_time

            # 缓存结果
            if use_cache and execution_time < self.slow_query_threshold:
                self.query_cache[cache_key] = (data, time.time())

            # 记录统计
            self.query_stats[cache_key] = execution_time

            return data

        except Exception as e:
            logger.error(f"查询执行失败: {e}")
            raise

    def batch_process(self, data: List[Any], batch_size: int = 1000, process_func=None):
        """批量处理数据"""
        if not data:
            return []

        results = []
        for i in range(0, len(data), batch_size):
            batch = data[i:i + batch_size]
            if process_func:
                batch_result = process_func(batch)
                results.extend(batch_result if isinstance(batch_result, list) else [batch_result])
            else:
                results.extend(batch)

            # 批次间稍作暂停，避免内存压力
            if i + batch_size < len(data):
                time.sleep(0.01)

        return results

    def parallel_execute(self, queries: List[Select], session: Session) -> List[List[Any]]:
        """并行执行多个查询"""
        def execute_single_query(query):
            return self.execute_optimized_query(query, session)

        # 使用线程池并行执行
        with ThreadPoolExecutor(max_workers=min(len(queries), 4)) as executor:
            results = list(executor.map(execute_single_query, queries))

        # 处理异常结果
        processed_results = []
        for result in results:
            if isinstance(result, Exception):
                logger.error(f"并行查询失败: {result}")
                processed_results.append([])
            else:
                processed_results.append(result)

        return processed_results

    def get_system_stats(self) -> Dict[str, Any]:
        """获取系统性能统计"""
        process = psutil.Process()
        memory_info = process.memory_info()

        stats = {
            "cpu_percent": process.cpu_percent(),
            "memory_percent": process.memory_percent(),
            "memory_used": memory_info.rss / 1024 / 1024,  # MB
            "memory_available": psutil.virtual_memory().available / 1024 / 1024,  # MB
            "thread_count": process.num_threads(),
            "cache_stats": {
                "cache_size": len(self.query_cache),
                "cache_hits": self.cache_hits,
                "cache_misses": self.cache_misses,
                "hit_rate": self.cache_hits / (self.cache_hits + self.cache_misses) * 100 if (self.cache_hits + self.cache_misses) > 0 else 0
            },
            "query_stats": {
                "total_queries": sum(self.query_stats.values()),
                "slow_queries": len([t for t in self.query_stats.values() if t > self.slow_query_threshold]),
                "avg_query_time": np.mean(list(self.query_stats.values())) if self.query_stats else 0
            }
        }

        return stats

    def optimize_memory_usage(self):
        """优化内存使用"""
        # 清理缓存
        if len(self.query_cache) > self.max_cache_size * 0.8:
            self._cleanup_expired_cache()

        # 强制垃圾回收
        gc.collect()

        logger.debug("内存优化已完成")

    def health_check(self) -> Dict[str, Any]:
        """性能健康检查"""
        stats = self.get_system_stats()

        health_status = {
            "status": "healthy",
            "timestamp": datetime.now().isoformat(),
            "system_health": {
                "cpu": "healthy" if stats["cpu_percent"] < 80 else "warning",
                "memory": "healthy" if stats["memory_percent"] < 80 else "warning",
                "cache": "healthy" if stats["cache_stats"]["hit_rate"] > 50 else "warning"
            },
            "recommendations": []
        }

        # 生成优化建议
        if stats["cpu_percent"] > 80:
            health_status["recommendations"].append("CPU使用率过高，建议优化查询或增加服务器资源")

        if stats["memory_percent"] > 80:
            health_status["recommendations"].append("内存使用率过高，建议清理缓存或优化数据结构")

        if stats["cache_stats"]["hit_rate"] < 50:
            health_status["recommendations"].append("缓存命中率较低，建议调整缓存策略")

        if stats["query_stats"]["slow_queries"] > 0:
            health_status["recommendations"].append(f"发现 {stats['query_stats']['slow_queries']} 个慢查询，建议优化索引")

        if any("warning" in status for status in health_status["system_health"].values()):
            health_status["status"] = "warning"

        return health_status

    def __del__(self):
        """析构函数"""
        self.stop_monitoring()
        if hasattr(self, '_executor'):
            self._executor.shutdown(wait=False)

# 创建全局性能优化服务实例
performance_service = PerformanceOptimizationService()

# 性能监控装饰器
def monitor_performance(func_name: str = None):
    """性能监控装饰器"""
    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            start_time = time.time()
            try:
                result = func(*args, **kwargs)
                execution_time = time.time() - start_time

                # 记录性能指标
                if execution_time > 0.1:  # 超过100ms的操作
                    logger.info(f"性能监控 - {func_name or func.__name__}: {execution_time:.3f}s")

                return result
            except Exception as e:
                execution_time = time.time() - start_time
                logger.error(f"性能监控 - {func_name or func.__name__} 失败: {e}, 耗时: {execution_time:.3f}s")
                raise

        return wrapper
    return decorator

# 数据库查询优化装饰器
def optimize_db_query(func):
    """数据库查询优化装饰器"""
    @wraps(func)
    async def wrapper(*args, **kwargs):
        # 添加查询优化逻辑
        start_time = time.time()

        try:
            result = func(*args, **kwargs)
            execution_time = time.time() - start_time

            if execution_time > 0.5:
                logger.warning(f"数据库查询优化建议: {func.__name__} 耗时 {execution_time:.3f}s")

            return result
        except Exception as e:
            logger.error(f"数据库查询失败: {func.__name__} - {e}")
            raise

    return wrapper