#!/usr/bin/env python3
"""
性能优化方案实现
ES查询优化、Redis缓存优化、分页优化
"""

import asyncio
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import json
import logging

from services.es_client import ESClient
from services.recommendation_cache import RecommendationCache
from config.redis import RedisUtils

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class PerformanceOptimizer:
    """性能优化器"""
    
    def __init__(self):
        self.es_client = ESClient(use_async=True)
        self.cache_manager = RecommendationCache()
    
    async def optimize_es_query(self, query: Dict[str, Any]) -> Dict[str, Any]:
        """ES查询优化"""
        try:
            # 1. 使用filter上下文而不是query上下文（不计算分数）
            optimized_query = self._convert_to_filter_context(query)
            
            # 2. 添加字段缓存配置
            optimized_query = self._add_field_cache_config(optimized_query)
            
            # 3. 优化排序字段
            optimized_query = self._optimize_sort_fields(optimized_query)
            
            logger.info("ES查询优化完成")
            return optimized_query
            
        except Exception as e:
            logger.error(f"ES查询优化失败: {e}")
            return query
    
    def _convert_to_filter_context(self, query: Dict[str, Any]) -> Dict[str, Any]:
        """将查询转换为filter上下文"""
        # 对于不需要计算分数的查询，使用filter上下文
        if "bool" in query:
            bool_query = query["bool"]
            
            # 将must中的term查询移到filter中
            if "must" in bool_query:
                filter_clauses = []
                must_clauses = []
                
                for clause in bool_query["must"]:
                    if "term" in clause and "status" in clause["term"]:
                        # status查询不需要计算分数
                        filter_clauses.append(clause)
                    else:
                        must_clauses.append(clause)
                
                if filter_clauses:
                    bool_query["filter"] = filter_clauses
                    bool_query["must"] = must_clauses
            
            query["bool"] = bool_query
        
        return query
    
    def _add_field_cache_config(self, query: Dict[str, Any]) -> Dict[str, Any]:
        """添加字段缓存配置"""
        # 为常用字段添加缓存配置
        cache_config = {
            "query": query,
            "_source": {
                "includes": [
                    "content_id", "title", "description", "author_nickname",
                    "tag_names", "first_image_url", "like_count", "comment_count",
                    "view_count", "published_at", "hot_score", "time_score"
                ]
            }
        }
        
        return cache_config
    
    def _optimize_sort_fields(self, query: Dict[str, Any]) -> Dict[str, Any]:
        """优化排序字段"""
        # 使用预计算的分数字段进行排序
        sort_config = [
            {"_score": {"order": "desc"}},
            {"hot_score": {"order": "desc"}},
            {"published_at": {"order": "desc"}}
        ]
        
        query["sort"] = sort_config
        return query
    
    async def implement_search_after_pagination(
        self,
        user_tag_ids: List[int],
        tag_weights: Optional[Dict[int, float]] = None,
        search_after: Optional[List] = None,
        size: int = 20
    ) -> Dict[str, Any]:
        """实现search_after分页（避免深分页问题）"""
        try:
            # 使用search_after分页
            result = await self.es_client.search_after_recommendation(
                user_tag_ids=user_tag_ids,
                search_after=search_after,
                tag_weights=tag_weights,
                size=size
            )
            
            logger.info(f"search_after分页查询完成，返回 {len(result['recommendations'])} 条结果")
            return result
            
        except Exception as e:
            logger.error(f"search_after分页查询失败: {e}")
            return {
                "recommendations": [],
                "next_search_after": None,
                "total": 0
            }
    
    async def implement_hot_content_cache(self, tag_id: int, size: int = 50) -> List[Dict[str, Any]]:
        """实现热点内容缓存"""
        try:
            # 检查缓存
            cached_contents = await self.cache_manager.get_hot_content_by_tag(tag_id)
            if cached_contents:
                logger.info(f"从缓存获取标签 {tag_id} 热门内容")
                return cached_contents[:size]
            
            # 从ES获取热门内容
            recommendations = await self.es_client.tag_based_recommendation(
                user_tag_ids=[tag_id],
                size=size
            )
            
            # 按热度排序
            sorted_recommendations = sorted(
                recommendations,
                key=lambda x: x.get("hot_score", 0),
                reverse=True
            )
            
            # 缓存结果
            await self.cache_manager.cache_hot_content_by_tag(
                tag_id, sorted_recommendations
            )
            
            logger.info(f"标签 {tag_id} 热门内容缓存更新完成")
            return sorted_recommendations
            
        except Exception as e:
            logger.error(f"热点内容缓存实现失败: {e}")
            return []
    
    async def implement_user_behavior_tracking(
        self,
        user_id: int,
        content_id: int,
        behavior_type: str
    ) -> bool:
        """实现用户行为跟踪"""
        try:
            # 记录用户行为
            await self.cache_manager.cache_user_behavior(
                user_id=user_id,
                content_id=content_id,
                behavior_type=behavior_type
            )
            
            # 基于行为更新标签权重
            await self._update_tag_weights_by_behavior(user_id, content_id, behavior_type)
            
            logger.info(f"用户 {user_id} 行为跟踪完成")
            return True
            
        except Exception as e:
            logger.error(f"用户行为跟踪失败: {e}")
            return False
    
    async def _update_tag_weights_by_behavior(
        self,
        user_id: int,
        content_id: int,
        behavior_type: str
    ):
        """基于用户行为更新标签权重"""
        try:
            # 获取内容标签
            # 这里需要从ES或MySQL获取内容的标签信息
            # 简化处理，假设通过ES获取
            
            # 获取用户当前标签权重
            current_weights = await self.cache_manager.get_tag_weights(user_id)
            
            # 根据行为类型调整权重
            weight_adjustment = {
                "view": 0.1,
                "like": 0.3,
                "comment": 0.5,
                "share": 0.4
            }
            
            adjustment = weight_adjustment.get(behavior_type, 0.1)
            
            # 更新权重（这里简化处理）
            # 实际实现中需要获取内容的标签信息
            updated_weights = current_weights.copy()
            
            # 缓存更新后的权重
            await self.cache_manager.cache_tag_weights(user_id, updated_weights)
            
            logger.info(f"用户 {user_id} 标签权重更新完成")
            
        except Exception as e:
            logger.error(f"标签权重更新失败: {e}")
    
    async def implement_query_caching(
        self,
        cache_key: str,
        query_func,
        expire: int = 600
    ) -> Any:
        """实现查询缓存"""
        try:
            # 检查缓存
            cached_result = RedisUtils.get(cache_key)
            if cached_result:
                logger.info(f"从缓存获取查询结果: {cache_key}")
                return json.loads(cached_result)
            
            # 执行查询
            result = await query_func()
            
            # 缓存结果
            RedisUtils.set(cache_key, json.dumps(result, ensure_ascii=False), expire)
            logger.info(f"查询结果缓存成功: {cache_key}")
            
            return result
            
        except Exception as e:
            logger.error(f"查询缓存实现失败: {e}")
            # 缓存失败时直接执行查询
            return await query_func()
    
    async def implement_batch_processing(
        self,
        items: List[Any],
        batch_size: int = 100,
        process_func
    ) -> List[Any]:
        """实现批量处理"""
        try:
            results = []
            
            # 分批处理
            for i in range(0, len(items), batch_size):
                batch = items[i:i + batch_size]
                batch_result = await process_func(batch)
                results.extend(batch_result)
                
                # 添加延迟，避免过载
                await asyncio.sleep(0.01)
            
            logger.info(f"批量处理完成，处理了 {len(items)} 个项目")
            return results
            
        except Exception as e:
            logger.error(f"批量处理失败: {e}")
            return []
    
    async def implement_circuit_breaker(
        self,
        operation_func,
        failure_threshold: int = 5,
        timeout: int = 30
    ) -> Any:
        """实现熔断器模式"""
        try:
            # 简化版熔断器实现
            start_time = datetime.utcnow()
            
            try:
                result = await asyncio.wait_for(operation_func(), timeout=timeout)
                logger.info("操作执行成功")
                return result
                
            except asyncio.TimeoutError:
                logger.warning(f"操作超时: {timeout}秒")
                raise Exception("操作超时")
                
        except Exception as e:
            logger.error(f"熔断器触发: {e}")
            raise e
    
    async def monitor_performance(self) -> Dict[str, Any]:
        """性能监控"""
        try:
            # ES健康检查
            es_health = await self.es_client.health_check()
            
            # Redis连接检查
            redis_health = RedisUtils.exists("health_check")
            
            # 缓存统计
            cache_stats = await self.cache_manager.get_cache_stats()
            
            performance_metrics = {
                "timestamp": datetime.utcnow().isoformat(),
                "es_health": es_health,
                "redis_health": redis_health,
                "cache_stats": cache_stats,
                "status": "healthy" if es_health and redis_health else "degraded"
            }
            
            logger.info("性能监控完成")
            return performance_metrics
            
        except Exception as e:
            logger.error(f"性能监控失败: {e}")
            return {
                "timestamp": datetime.utcnow().isoformat(),
                "status": "error",
                "error": str(e)
            }

# 性能优化配置
class PerformanceConfig:
    """性能优化配置"""
    
    # ES查询优化配置
    ES_OPTIMIZATION = {
        "use_filter_context": True,
        "enable_field_cache": True,
        "max_result_window": 10000,
        "refresh_interval": "30s"
    }
    
    # 缓存配置
    CACHE_CONFIG = {
        "recommendation_expire": 600,  # 10分钟
        "hot_content_expire": 3600,    # 1小时
        "user_behavior_expire": 86400, # 24小时
        "max_cache_size": 1000
    }
    
    # 分页配置
    PAGINATION_CONFIG = {
        "default_size": 20,
        "max_size": 100,
        "use_search_after": True,
        "max_offset": 10000
    }
    
    # 批量处理配置
    BATCH_CONFIG = {
        "batch_size": 100,
        "max_concurrent": 10,
        "delay_between_batches": 0.01
    }
    
    # 熔断器配置
    CIRCUIT_BREAKER_CONFIG = {
        "failure_threshold": 5,
        "timeout": 30,
        "recovery_timeout": 60
    }

# 性能优化工具函数
class PerformanceUtils:
    """性能优化工具函数"""
    
    @staticmethod
    def calculate_query_complexity(query: Dict[str, Any]) -> int:
        """计算查询复杂度"""
        complexity = 0
        
        if "bool" in query:
            bool_query = query["bool"]
            complexity += len(bool_query.get("must", []))
            complexity += len(bool_query.get("should", []))
            complexity += len(bool_query.get("filter", []))
        
        if "function_score" in query:
            complexity += len(query["function_score"].get("functions", []))
        
        return complexity
    
    @staticmethod
    def estimate_query_cost(query: Dict[str, Any]) -> str:
        """估算查询成本"""
        complexity = PerformanceUtils.calculate_query_complexity(query)
        
        if complexity <= 3:
            return "low"
        elif complexity <= 10:
            return "medium"
        else:
            return "high"
    
    @staticmethod
    def optimize_query_size(size: int) -> int:
        """优化查询大小"""
        if size <= 0:
            return 20
        elif size > 100:
            return 100
        else:
            return size

if __name__ == "__main__":
    # 测试性能优化
    async def test_performance_optimization():
        optimizer = PerformanceOptimizer()
        
        # 测试ES查询优化
        test_query = {
            "bool": {
                "must": [
                    {"term": {"status": 2}},
                    {"terms": {"tag_ids": [1, 3, 5]}}
                ]
            }
        }
        
        optimized_query = await optimizer.optimize_es_query(test_query)
        print(f"查询优化结果: {optimized_query}")
        
        # 测试性能监控
        metrics = await optimizer.monitor_performance()
        print(f"性能监控结果: {metrics}")
    
    asyncio.run(test_performance_optimization())
