"""
增强的监控数据收集器
提供更详细的Kafka集群监控指标收集和分析
"""

import asyncio
import json
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Any
from collections import defaultdict, deque

from app.core.kafka_client import get_kafka_client
from app.core.redis_client import redis_manager
from app.core.redis_keys import RedisKeys
from app.utils.logger import get_logger

logger = get_logger(__name__)

class EnhancedMonitorCollector:
    """增强的监控数据收集器"""
    
    def __init__(self):
        self.consumer_rate_cache = defaultdict(lambda: deque(maxlen=100))  # 消费速率缓存
        self.throughput_cache = defaultdict(lambda: deque(maxlen=100))     # 吞吐量缓存
        self.lag_cache = defaultdict(lambda: deque(maxlen=100))            # Lag缓存
        
    async def collect_consumer_group_metrics(self) -> Dict[str, Any]:
        """收集消费者组详细指标"""
        try:
            kafka_client = get_kafka_client()
            consumer_groups = await kafka_client.list_consumer_groups()

            metrics = {
                'timestamp': datetime.now().isoformat(),
                'consumer_groups': {},
                'total_groups': len(consumer_groups),
                'active_groups': 0,
                'total_lag': 0,
                'backpressure_indicators': {
                    'high_lag_groups': [],
                    'slow_consumers': [],
                    'unhealthy_groups': []
                }
            }

            for group_id in consumer_groups:
                # 先创建基本信息，确保每个消费者组都会被显示
                group_metrics = {
                    'group_id': group_id,
                    'state': 'Unknown',
                    'members': 0,
                    'topics': {},
                    'total_lag': 0,
                    'is_active': False,
                    'health_score': 100,  # 健康度评分 0-100
                    'backpressure_level': 'low'  # low, medium, high
                }

                try:
                    group_info = await kafka_client.describe_consumer_group(group_id)
                    # 如果能获取到详细信息，则更新
                    if group_info:
                        group_metrics['state'] = group_info.get('state', 'Unknown')
                        members = group_info.get('members', [])
                        group_metrics['members'] = len(members) if members else 0
                        group_metrics['is_active'] = group_metrics['members'] > 0
                except Exception as e:
                    logger.debug(f"获取消费者组 {group_id} 详细信息失败: {e}")
                    # 继续使用基本信息

                if group_metrics['is_active']:
                    metrics['active_groups'] += 1

                # 获取每个Topic的消费情况
                try:
                        offsets = await kafka_client.get_consumer_group_offsets(group_id)
                        for topic_partition, offset_metadata in offsets.items():
                            topic_name = topic_partition.topic
                            partition_id = topic_partition.partition

                            if topic_name not in group_metrics['topics']:
                                group_metrics['topics'][topic_name] = {
                                    'partitions': {},
                                    'total_lag': 0,
                                    'consumption_rate': 0.0,
                                    'production_rate': 0.0,
                                    'backpressure_ratio': 0.0  # 消费速率/生产速率
                                }

                            # 获取最新偏移量计算Lag
                            try:
                                latest_offsets = await kafka_client.get_partition_metadata(topic_name)
                                if partition_id < len(latest_offsets):
                                    latest_offset = latest_offsets[partition_id]['high_water_mark']
                                    current_offset = offset_metadata.offset
                                    lag = max(0, latest_offset - current_offset)

                                    # 计算消费速率
                                    consumption_rate = await self._calculate_consumption_rate(group_id, topic_name, current_offset)

                                    # 计算生产速率（基于high_water_mark变化）
                                    production_rate = await self._calculate_production_rate(topic_name, partition_id, latest_offset)

                                    # 更新Topic级别的速率
                                    group_metrics['topics'][topic_name]['consumption_rate'] += consumption_rate
                                    group_metrics['topics'][topic_name]['production_rate'] += production_rate

                                    # 计算背压比率
                                    if production_rate > 0:
                                        backpressure_ratio = consumption_rate / production_rate
                                        group_metrics['topics'][topic_name]['backpressure_ratio'] = backpressure_ratio

                                    group_metrics['topics'][topic_name]['partitions'][partition_id] = {
                                        'current_offset': current_offset,
                                        'latest_offset': latest_offset,
                                        'lag': lag,
                                        'consumption_rate': consumption_rate,
                                        'production_rate': production_rate
                                    }

                                    group_metrics['topics'][topic_name]['total_lag'] += lag
                                    group_metrics['total_lag'] += lag
                                    metrics['total_lag'] += lag
                            except Exception as e:
                                logger.debug(f"获取分区 {topic_name}:{partition_id} 元数据失败: {e}")
                except Exception as e:
                    logger.debug(f"获取消费者组 {group_id} 偏移量失败: {e}")

                    # 简化的健康度评分和背压级别
                    health_score = 100
                    if group_metrics['total_lag'] > 1000:
                        health_score -= 30
                    if group_metrics['members'] == 0:
                        health_score -= 20
                    group_metrics['health_score'] = max(0, health_score)

                    # 简化的背压级别
                    if group_metrics['total_lag'] > 5000:
                        group_metrics['backpressure_level'] = 'high'
                    elif group_metrics['total_lag'] > 1000:
                        group_metrics['backpressure_level'] = 'medium'
                    else:
                        group_metrics['backpressure_level'] = 'low'

                    # 添加到背压指标
                    if group_metrics['total_lag'] > 1000:
                        metrics['backpressure_indicators']['high_lag_groups'].append(group_id)

                    if group_metrics['backpressure_level'] == 'high':
                        metrics['backpressure_indicators']['slow_consumers'].append(group_id)

                    if group_metrics['health_score'] < 70:
                        metrics['backpressure_indicators']['unhealthy_groups'].append(group_id)

                # 确保消费者组被添加到结果中（即使没有详细信息也要显示）
                metrics['consumer_groups'][group_id] = group_metrics
            
            # 存储到Redis
            await self._store_consumer_group_metrics(metrics)
            
            return metrics
            
        except Exception as e:
            logger.error(f"收集消费者组指标失败: {e}")
            return {}
    
    async def _calculate_production_rate(self, topic_name: str, partition_id: int, current_offset: int) -> float:
        """计算生产速率（消息/分钟）"""
        try:
            # 获取历史生产数据
            history_key = f"monitor:topic:{topic_name}:partition:{partition_id}:offsets"
            recent_data = await redis_manager.lrange(history_key, 0, 30)

            if len(recent_data) < 2:
                return 0.0

            # 解析数据
            parsed_records = []
            for data in recent_data:
                try:
                    if isinstance(data, str):
                        record = json.loads(data)
                    else:
                        record = data
                    parsed_records.append(record)
                except:
                    continue

            if len(parsed_records) < 2:
                return 0.0

            # 计算时间窗口内的生产速率
            latest = parsed_records[0]
            oldest = parsed_records[-1]

            time_diff = (datetime.fromisoformat(latest['timestamp']) -
                        datetime.fromisoformat(oldest['timestamp'])).total_seconds()

            if time_diff <= 0:
                return 0.0

            offset_diff = latest.get('high_water_mark', current_offset) - oldest.get('high_water_mark', current_offset)
            rate_per_second = offset_diff / time_diff
            rate_per_minute = rate_per_second * 60

            return max(0.0, rate_per_minute)

        except Exception as e:
            logger.debug(f"计算生产速率失败 {topic_name}:{partition_id}: {e}")
            return 0.0

    def _calculate_health_score(self, group_metrics: Dict[str, Any]) -> int:
        """计算消费者组健康度评分 (0-100)"""
        score = 100

        # 基于Lag扣分
        total_lag = group_metrics.get('total_lag', 0)
        if total_lag > 10000:
            score -= 50
        elif total_lag > 1000:
            score -= 30
        elif total_lag > 100:
            score -= 10

        # 基于消费者数量扣分
        if group_metrics.get('members', 0) == 0:
            score -= 20

        # 基于状态扣分
        state = group_metrics.get('state', 'Unknown')
        if state in ['Dead', 'Empty']:
            score -= 30
        elif state == 'Unknown':
            score -= 10

        # 基于背压比率扣分
        for topic_data in group_metrics.get('topics', {}).values():
            backpressure_ratio = topic_data.get('backpressure_ratio', 1.0)
            if backpressure_ratio < 0.5:  # 消费速率低于生产速率的50%
                score -= 20
            elif backpressure_ratio < 0.8:
                score -= 10

        return max(0, min(100, score))

    def _calculate_backpressure_level(self, group_metrics: Dict[str, Any]) -> str:
        """计算背压级别"""
        total_lag = group_metrics.get('total_lag', 0)
        health_score = group_metrics.get('health_score', 100)

        # 检查所有Topic的背压比率
        min_backpressure_ratio = 1.0
        for topic_data in group_metrics.get('topics', {}).values():
            ratio = topic_data.get('backpressure_ratio', 1.0)
            min_backpressure_ratio = min(min_backpressure_ratio, ratio)

        # 综合判断背压级别
        if total_lag > 5000 or health_score < 50 or min_backpressure_ratio < 0.3:
            return 'high'
        elif total_lag > 1000 or health_score < 80 or min_backpressure_ratio < 0.7:
            return 'medium'
        else:
            return 'low'

    async def _calculate_consumption_rate(self, group_id: str, topic_name: str, current_offset: int = None) -> float:
        """计算消费速率（消息/分钟）"""
        try:
            # 获取历史偏移量数据
            history_key = RedisKeys.monitor_consumer_group_offsets(group_id, topic_name)
            recent_data = await redis_manager.lrange(history_key, 0, 30)
            
            if len(recent_data) < 2:
                return 0.0
            
            # 解析数据
            parsed_records = []
            for data in recent_data:
                try:
                    if isinstance(data, str):
                        record = json.loads(data)
                    else:
                        record = data
                    record['parsed_time'] = datetime.fromisoformat(record['timestamp'])
                    parsed_records.append(record)
                except Exception:
                    continue
            
            if len(parsed_records) < 2:
                return 0.0
            
            # 按时间排序
            parsed_records.sort(key=lambda x: x['parsed_time'], reverse=True)
            
            # 使用多时间窗口计算
            now = datetime.now()
            time_windows = [1, 5, 15]  # 分钟
            best_rate = 0.0
            
            for window_minutes in time_windows:
                window_start = now - timedelta(minutes=window_minutes)
                window_records = [r for r in parsed_records if r['parsed_time'] >= window_start]
                
                if len(window_records) < 2:
                    continue
                
                latest_record = window_records[0]
                earliest_record = window_records[-1]
                
                time_diff_seconds = (latest_record['parsed_time'] - earliest_record['parsed_time']).total_seconds()
                if time_diff_seconds <= 0:
                    continue
                
                # 计算总消费的消息数
                total_consumed = 0
                if 'total_offset' in latest_record and 'total_offset' in earliest_record:
                    total_consumed = latest_record['total_offset'] - earliest_record['total_offset']
                
                if total_consumed > 0:
                    rate_per_minute = (total_consumed / time_diff_seconds) * 60
                    if rate_per_minute > best_rate:
                        best_rate = rate_per_minute
                    
                    # 优先使用1分钟窗口
                    if window_minutes == 1:
                        break
            
            return best_rate
            
        except Exception as e:
            logger.debug(f"计算消费速率失败 {group_id}:{topic_name}: {e}")
            return 0.0
    
    # 吞吐量监控功能已删除

    # Topic级别的吞吐量计算函数已删除
    
    async def _collect_partition_metrics(self, topic_name: str, partition_id: int) -> Dict[str, Any]:
        """收集分区级别指标"""
        try:
            kafka_client = get_kafka_client()
            
            # 获取分区元数据
            partition_metadata = await kafka_client.get_partition_metadata(topic_name)
            if partition_id >= len(partition_metadata):
                return {}
            
            partition_info = partition_metadata[partition_id]
            
            metrics = {
                'partition_id': partition_id,
                'leader': partition_info.get('leader', -1),
                'replicas': partition_info.get('replicas', []),
                'isr': partition_info.get('isr', []),
                'high_water_mark': partition_info.get('high_water_mark', 0),
                'log_start_offset': partition_info.get('log_start_offset', 0),
                'message_count': partition_info.get('high_water_mark', 0) - partition_info.get('log_start_offset', 0),
                'is_under_replicated': len(partition_info.get('isr', [])) < len(partition_info.get('replicas', [])),
                'timestamp': datetime.now().isoformat()
            }
            
            # 存储分区指标
            partition_key = RedisKeys.monitor_partition_metrics(topic_name, partition_id)
            await redis_manager.lpush(partition_key, json.dumps(metrics))
            await redis_manager.ltrim(partition_key, 0, 99)  # 保留最近100个记录
            
            return metrics
            
        except Exception as e:
            logger.debug(f"收集分区指标失败 {topic_name}:{partition_id}: {e}")
            return {}
    
    async def _store_consumer_group_metrics(self, metrics: Dict[str, Any]):
        """存储消费者组指标到Redis"""
        try:
            # 为每个消费者组存储详细指标
            for group_id, group_metrics in metrics['consumer_groups'].items():
                for topic_name, topic_metrics in group_metrics['topics'].items():
                    # 存储Lag历史
                    lag_key = RedisKeys.monitor_consumer_group_lag(group_id, topic_name)
                    lag_record = {
                        'timestamp': metrics['timestamp'],
                        'group_id': group_id,
                        'topic_name': topic_name,
                        'total_lag': topic_metrics['total_lag'],
                        'consumption_rate': topic_metrics['consumption_rate'],
                        'partitions': topic_metrics['partitions']
                    }
                    await redis_manager.lpush(lag_key, json.dumps(lag_record))
                    await redis_manager.ltrim(lag_key, 0, 199)  # 保留最近200个记录
                    
                    # 存储消费速率历史
                    rate_key = RedisKeys.monitor_consumer_rate(group_id, topic_name)
                    rate_record = {
                        'timestamp': metrics['timestamp'],
                        'group_id': group_id,
                        'topic_name': topic_name,
                        'consumption_rate': topic_metrics['consumption_rate']
                    }
                    await redis_manager.lpush(rate_key, json.dumps(rate_record))
                    await redis_manager.ltrim(rate_key, 0, 199)
            
        except Exception as e:
            logger.error(f"存储消费者组指标失败: {e}")
    
    # 吞吐量存储功能已删除
    
    async def collect_cluster_health_metrics(self) -> Dict[str, Any]:
        """收集集群健康状态指标"""
        try:
            kafka_client = get_kafka_client()
            
            # 获取集群基本信息
            cluster_metadata = await kafka_client.get_cluster_metadata()
            if not cluster_metadata:
                return {}
            brokers = cluster_metadata.get('brokers', [])
            
            health_metrics = {
                'timestamp': datetime.now().isoformat(),
                'cluster_id': cluster_metadata.get('cluster_id', 'unknown'),
                'broker_count': len(brokers),
                'brokers': {},
                'overall_health': 'healthy',
                'issues': []
            }

            # 检查每个Broker
            for broker in brokers:
                broker_id = broker.get('nodeId')
                broker_metrics = {
                    'broker_id': broker_id,
                    'host': broker.get('host'),
                    'port': broker.get('port'),
                    'is_controller': broker_id == cluster_metadata.get('controller_id'),
                    'is_available': True,    # 简化检查
                    'rack': broker.get('rack')
                }
                health_metrics['brokers'][broker_id] = broker_metrics
            
            # 检查Topic健康状态
            topics = await kafka_client.list_topics()
            under_replicated_topics = 0
            
            for topic_name in topics:
                if topic_name.startswith('__'):
                    continue
                
                try:
                    topic_metadata = await kafka_client.get_topic_metadata(topic_name)
                    for partition_info in topic_metadata.partitions:
                        replicas = partition_info.get('replicas', [])
                        isr = partition_info.get('isr', [])
                        
                        if len(isr) < len(replicas):
                            under_replicated_topics += 1
                            health_metrics['issues'].append(
                                f"Topic {topic_name} 分区 {partition_info.partition} 副本不足"
                            )
                except Exception:
                    continue
            
            # 评估整体健康状态
            if under_replicated_topics > 0:
                health_metrics['overall_health'] = 'warning'
            
            if len(health_metrics['issues']) > 10:
                health_metrics['overall_health'] = 'critical'
            
            health_metrics['under_replicated_partitions'] = under_replicated_topics
            
            # 存储健康状态
            health_key = RedisKeys.monitor_cluster_health()
            await redis_manager.lpush(health_key, json.dumps(health_metrics))
            await redis_manager.ltrim(health_key, 0, 99)
            
            return health_metrics
            
        except Exception as e:
            logger.error(f"收集集群健康指标失败: {e}")
            return {}


# 全局实例
enhanced_monitor = EnhancedMonitorCollector()
