"""
监控面板API
"""
from typing import Dict, List, Any, Optional
from fastapi import APIRouter, HTTPException, Query
from pydantic import BaseModel
import asyncio
from datetime import datetime, timedelta
import time
import json

from app.core.kafka_client import get_kafka_client
from app.core.redis_client import redis_manager
from app.utils.logger import logger
from aiokafka import TopicPartition

router = APIRouter(tags=["监控面板"])

# 导入统一的Redis key管理系统
from app.core.redis_keys import RedisKeys, REDIS_PERFORMANCE_HISTORY_KEY, REDIS_PERFORMANCE_LATEST_KEY
from app.core.enhanced_monitor import enhanced_monitor

# 历史数据配置
MAX_HISTORY_POINTS = 10080  # 最多保留10080个数据点（7天 * 24小时 * 60分钟）
HISTORY_TTL = 7 * 24 * 3600  # 7天过期时间


class ClusterInfo(BaseModel):
    """集群信息"""
    broker_count: int
    controller_id: int
    cluster_id: str
    brokers: List[Dict[str, Any]]


class TopicStats(BaseModel):
    """Topic统计信息"""
    name: str
    partition_count: int
    replication_factor: int
    latest_offset: int
    earliest_offset: int


class ConsumerGroupInfo(BaseModel):
    """消费者组信息"""
    group_id: str
    state: str
    protocol: str
    protocol_type: str
    members: List[Dict[str, Any]]
    lag: Optional[int] = None


class MonitorOverview(BaseModel):
    """监控概览"""
    cluster_info: ClusterInfo
    topic_count: int
    consumer_group_count: int
    total_partitions: int
    active_connections: int
    uptime: str


class TopicMetrics(BaseModel):
    """Topic指标"""
    topic: str
    messages_per_second: float
    bytes_per_second: float
    partition_metrics: List[Dict[str, Any]]


class HistoricalDataPoint(BaseModel):
    """历史数据点"""
    timestamp: str
    cpu_usage: float
    memory_usage: float
    disk_usage: float
    network_bytes_in: int
    network_bytes_out: int
    kafka_messages_per_second: float
    kafka_bytes_per_second: float
    kafka_active_connections: int


class HistoricalData(BaseModel):
    """历史数据"""
    timeRange: str
    dataPoints: List[HistoricalDataPoint]
    summary: Dict[str, Any]


async def store_performance_data_point(data_point: HistoricalDataPoint):
    """存储性能数据点到Redis"""
    try:
        # 将数据点添加到Redis列表的左侧（最新数据在前）
        await redis_manager.lpush(REDIS_PERFORMANCE_HISTORY_KEY, data_point.model_dump())

        # 修剪列表，只保留最新的MAX_HISTORY_POINTS个数据点
        await redis_manager.ltrim(REDIS_PERFORMANCE_HISTORY_KEY, 0, MAX_HISTORY_POINTS - 1)

        # 设置过期时间
        await redis_manager.expire(REDIS_PERFORMANCE_HISTORY_KEY, HISTORY_TTL)

        # 同时存储最新的数据点，用于快速访问
        await redis_manager.set_json(REDIS_PERFORMANCE_LATEST_KEY, data_point.model_dump(), ttl=HISTORY_TTL)

        logger.debug(f"性能数据点已存储到Redis: {data_point.timestamp}")

    except Exception as e:
        logger.error(f"存储性能数据点到Redis失败: {e}")


async def get_performance_history_from_redis(
    time_range: str = "1h",
    max_points: int = 100
) -> List[HistoricalDataPoint]:
    """从Redis获取历史性能数据"""
    try:
        # 计算时间范围
        end_time = datetime.now()
        if time_range == "1h":
            start_time = end_time - timedelta(hours=1)
        elif time_range == "6h":
            start_time = end_time - timedelta(hours=6)
        elif time_range == "24h":
            start_time = end_time - timedelta(hours=24)
        elif time_range == "7d":
            start_time = end_time - timedelta(days=7)
        else:
            start_time = end_time - timedelta(hours=1)

        # 从Redis获取所有历史数据
        raw_data = await redis_manager.lrange(REDIS_PERFORMANCE_HISTORY_KEY, 0, -1)

        if not raw_data:
            return []

        # 过滤时间范围内的数据
        filtered_data = []
        for item in raw_data:
            try:
                data_point = HistoricalDataPoint(**item)
                point_time = datetime.fromisoformat(data_point.timestamp.replace('Z', ''))

                if start_time <= point_time <= end_time:
                    filtered_data.append(data_point)

            except Exception as e:
                logger.warning(f"解析历史数据点失败: {e}")
                continue

        # 按时间排序（最旧的在前）
        filtered_data.sort(key=lambda x: x.timestamp)

        # 数据采样（如果数据点太多）
        if len(filtered_data) > max_points:
            step = len(filtered_data) // max_points
            filtered_data = filtered_data[::step]

        logger.debug(f"从Redis获取了 {len(filtered_data)} 个历史数据点")
        return filtered_data

    except Exception as e:
        logger.error(f"从Redis获取历史数据失败: {e}")
        return []


async def get_latest_performance_data() -> Optional[HistoricalDataPoint]:
    """从Redis获取最新的性能数据"""
    try:
        latest_data = await redis_manager.get_json(REDIS_PERFORMANCE_LATEST_KEY)
        if latest_data:
            return HistoricalDataPoint(**latest_data)
        return None
    except Exception as e:
        logger.error(f"从Redis获取最新性能数据失败: {e}")
        return None


async def _calculate_message_rate() -> float:
    """计算消息速率（消息/秒）"""
    try:
        # 获取所有Topic的消息速率并求和
        kafka_client = get_kafka_client()
        topics = await kafka_client.list_topics()

        total_rate_per_minute = 0.0

        for topic in topics:
            if topic.startswith('__'):  # 跳过内部Topic
                continue

            topic_rate_per_minute = await _calculate_topic_message_rate(topic)
            total_rate_per_minute += topic_rate_per_minute

        # 将每分钟的速率转换为每秒的速率
        total_rate_per_second = total_rate_per_minute / 60.0

        logger.info(f"总消息速率: {total_rate_per_minute:.2f} msg/min = {total_rate_per_second:.2f} msg/s")

        return total_rate_per_second

    except Exception as e:
        logger.warning(f"计算消息速率失败: {e}")
        return 0.0


async def _calculate_bytes_rate() -> float:
    """计算全局字节速率（所有Topic聚合）"""
    try:
        kafka_client = get_kafka_client()
        topics = await kafka_client.list_topics()

        if not topics:
            return 0.0

        total_rate = 0.0
        for topic in topics:
            try:
                # 跳过内部Topic
                if topic.startswith('__'):
                    continue

                topic_rate = await _calculate_topic_bytes_rate(topic)
                total_rate += topic_rate

            except Exception as topic_error:
                logger.debug(f"计算Topic {topic} 字节速率失败: {topic_error}")
                continue

        logger.debug(f"全局字节速率: {total_rate:.2f} bytes/s")
        return total_rate

    except Exception as e:
        logger.warning(f"计算全局字节速率失败: {e}")
        return 0.0


async def _calculate_topic_message_rate(topic_name: str) -> float:
    """
    计算特定Topic的消息生产速率（每分钟）
    参考Kafka Manager的实现方式：
    1. 使用滑动窗口计算
    2. 多时间点采样
    3. 显示历史趋势而不是实时变化
    """
    try:
        # 从Redis获取Topic的历史偏移量数据
        topic_history_key = RedisKeys.monitor_topic_offsets(topic_name)

        # 获取最近的记录（增加到50个以获得更好的采样）
        recent_offsets = await redis_manager.lrange(topic_history_key, 0, 50)

        if len(recent_offsets) < 2:
            logger.debug(f"Topic {topic_name} 历史数据不足，无法计算速率 (只有 {len(recent_offsets)} 个记录)")
            return 0.0

        # 解析偏移量记录
        parsed_records = []
        for data in recent_offsets:
            try:
                if isinstance(data, str):
                    data = data.replace('\\"', '"')
                    record = json.loads(data)
                else:
                    record = data

                record['parsed_time'] = datetime.fromisoformat(record['timestamp'])
                parsed_records.append(record)

            except (json.JSONDecodeError, TypeError) as parse_error:
                logger.debug(f"跳过无效记录: {parse_error}")
                continue

        if len(parsed_records) < 2:
            logger.debug(f"Topic {topic_name} 有效记录不足")
            return 0.0

        # 按时间排序（最新的在前）
        parsed_records.sort(key=lambda x: x['parsed_time'], reverse=True)

        # 使用多种时间窗口计算速率，选择最合适的
        now = datetime.now()

        # 尝试不同的时间窗口：1分钟、5分钟、15分钟
        time_windows = [1, 5, 15]
        best_rate = 0.0

        for window_minutes in time_windows:
            window_start = now - timedelta(minutes=window_minutes)

            # 过滤时间窗口内的记录
            window_records = [
                r for r in parsed_records
                if r['parsed_time'] >= window_start
            ]

            if len(window_records) < 2:
                continue

            # 找到窗口内最早和最晚的记录
            latest_in_window = window_records[0]
            earliest_in_window = window_records[-1]

            # 计算时间差和偏移量差
            time_diff_seconds = (latest_in_window['parsed_time'] - earliest_in_window['parsed_time']).total_seconds()

            if time_diff_seconds <= 0:
                continue

            offset_diff = latest_in_window['latest_offset'] - earliest_in_window['latest_offset']

            if offset_diff > 0:
                # 计算每分钟的速率
                rate_per_minute = (offset_diff / time_diff_seconds) * 60

                # 选择最高的合理速率（表示最近的活动）
                if rate_per_minute > best_rate:
                    best_rate = rate_per_minute
                    logger.debug(f"Topic {topic_name} {window_minutes}分钟窗口速率: {rate_per_minute:.2f} msg/min")

                # 如果1分钟窗口有数据，优先使用（最实时）
                if window_minutes == 1:
                    break

        # 如果所有窗口都没有变化，检查是否有历史数据可以显示趋势
        if best_rate == 0.0:
            # 查找最近30分钟内的任何变化
            thirty_minutes_ago = now - timedelta(minutes=30)
            extended_records = [
                r for r in parsed_records
                if r['parsed_time'] >= thirty_minutes_ago
            ]

            if len(extended_records) >= 2:
                latest_record = extended_records[0]

                # 寻找有偏移量变化的记录
                for record in extended_records[1:]:
                    if record['latest_offset'] != latest_record['latest_offset']:
                        time_diff_seconds = (latest_record['parsed_time'] - record['parsed_time']).total_seconds()
                        offset_diff = latest_record['latest_offset'] - record['latest_offset']

                        if time_diff_seconds > 0 and offset_diff > 0:
                            # 计算历史平均速率，但降权显示
                            historical_rate = (offset_diff / time_diff_seconds) * 60 * 0.3  # 降权到30%
                            best_rate = historical_rate
                            logger.debug(f"Topic {topic_name} 历史趋势速率: {historical_rate:.2f} msg/min")
                        break

        if best_rate > 0:
            logger.info(f"Topic {topic_name} 计算速率: {best_rate:.2f} msg/min")
            return best_rate

        logger.debug(f"Topic {topic_name} 无消息活动，速率为0")
        return 0.0

    except Exception as e:
        logger.warning(f"计算Topic {topic_name} 消息速率失败: {e}")
        return 0.0


async def _get_topic_offsets(kafka_client, topic: str, partition_count: int) -> tuple[int, int]:
    """获取Topic的偏移量信息"""
    try:
        # 方法1：尝试使用简单的消费者获取偏移量
        from aiokafka import AIOKafkaConsumer

        # 获取Kafka配置
        config = await kafka_client._get_kafka_config()

        # 使用固定的消费者组名，避免每次创建新的消费者组
        import hashlib
        topic_hash = hashlib.md5(topic.encode()).hexdigest()[:8]
        group_id = f"kmsg-ui-offset-check-{topic_hash}"

        # 创建简单的消费者
        consumer = AIOKafkaConsumer(
            bootstrap_servers=config['bootstrap_servers'],
            api_version=config.get('api_version', 'auto'),
            group_id=group_id,
            enable_auto_commit=False,
            auto_offset_reset='earliest',
            consumer_timeout_ms=5000  # 5秒超时
        )

        try:
            await consumer.start()

            # 获取分区信息
            partitions = [TopicPartition(topic, p) for p in range(partition_count)]

            # 获取最新和最早偏移量
            latest_offsets = await consumer.end_offsets(partitions)
            earliest_offsets = await consumer.beginning_offsets(partitions)

            # 计算总偏移量
            total_latest = sum(latest_offsets.values()) if latest_offsets else 0
            total_earliest = sum(earliest_offsets.values()) if earliest_offsets else 0

            logger.debug(f"Topic {topic} 偏移量获取成功: latest={total_latest}, earliest={total_earliest}")
            return total_latest, total_earliest

        except Exception as consumer_error:
            logger.warning(f"消费者获取Topic {topic} 偏移量失败: {consumer_error}")
            # 如果缓存也没有，返回默认值
            logger.warning(f"Topic {topic} 偏移量获取失败，返回默认值")
            return 0, 0

        finally:
            # 确保消费者总是被关闭
            try:
                await consumer.stop()
                logger.debug(f"消费者组 {group_id} 已关闭")
            except Exception as close_error:
                logger.warning(f"关闭消费者组 {group_id} 时出错: {close_error}")

    except Exception as e:
        logger.error(f"获取Topic {topic} 偏移量时发生错误: {e}")
        return 0, 0


async def _get_cached_topic_offsets(topic: str) -> tuple[int, int] | None:
    """从Redis获取缓存的Topic偏移量"""
    try:
        topic_history_key = RedisKeys.monitor_topic_offsets(topic)
        recent_record = await redis_manager.lindex(topic_history_key, 0)

        if recent_record:
            return recent_record['latest_offset'], recent_record['earliest_offset']

        return None

    except Exception as e:
        logger.warning(f"获取Topic {topic} 缓存偏移量失败: {e}")
        return None


async def _store_topic_offset_history(topic_name: str, latest_offset: int, earliest_offset: int):
    """存储Topic偏移量历史记录到Redis"""
    try:
        topic_history_key = RedisKeys.monitor_topic_offsets(topic_name)

        # 创建偏移量记录
        offset_record = {
            "timestamp": datetime.now().isoformat(),
            "latest_offset": latest_offset,
            "earliest_offset": earliest_offset,
            "message_count": latest_offset - earliest_offset if latest_offset > earliest_offset else 0
        }

        # 添加到Redis列表的左侧（最新数据在前）
        await redis_manager.lpush(topic_history_key, offset_record)

        # 修剪列表，只保留最新的50个记录
        await redis_manager.ltrim(topic_history_key, 0, 49)

        # 设置过期时间（24小时）
        await redis_manager.expire(topic_history_key, 24 * 3600)

        logger.debug(f"Topic {topic_name} 偏移量历史已存储: latest={latest_offset}, earliest={earliest_offset}")

    except Exception as e:
        logger.error(f"存储Topic {topic_name} 偏移量历史失败: {e}")


async def _calculate_topic_consumption_rate(topic_name: str, time_window_minutes: int = 5) -> float:
    """计算特定Topic的消费速率（基于消费者组偏移量）"""
    try:
        kafka_client = get_kafka_client()

        # 获取该Topic的所有消费者组
        consumer_groups = await kafka_client.list_consumer_groups()

        if not consumer_groups:
            logger.debug(f"Topic {topic_name} 没有找到消费者组")
            return 0.0

        total_consumption_rate = 0.0
        active_groups = 0

        for group_id in consumer_groups:
            try:
                # 获取消费者组的偏移量历史
                group_history_key = RedisKeys.monitor_consumer_group_offsets(group_id, topic_name)
                recent_offsets = await redis_manager.lrange(group_history_key, 0, 10)

                if len(recent_offsets) < 2:
                    continue

                # 解析记录
                parsed_records = []
                for data in recent_offsets:
                    try:
                        if isinstance(data, str):
                            data = data.replace('\\"', '"')
                            record = json.loads(data)
                        else:
                            record = data

                        record['parsed_time'] = datetime.fromisoformat(record['timestamp'])
                        parsed_records.append(record)
                    except:
                        continue

                if len(parsed_records) < 2:
                    continue

                # 按时间排序
                parsed_records.sort(key=lambda x: x['parsed_time'], reverse=True)

                # 计算消费速率
                latest_record = parsed_records[0]
                previous_record = parsed_records[1]

                time_diff = (latest_record['parsed_time'] - previous_record['parsed_time']).total_seconds()
                if time_diff <= 0:
                    continue

                # 消费速率基于committed_offset变化
                latest_committed = latest_record.get('committed_offset', 0)
                previous_committed = previous_record.get('committed_offset', 0)
                consumed_diff = latest_committed - previous_committed

                if consumed_diff > 0:
                    group_rate = consumed_diff / time_diff
                    total_consumption_rate += group_rate
                    active_groups += 1
                    logger.debug(f"消费者组 {group_id} 对Topic {topic_name} 的消费速率: {group_rate:.2f} msg/s")

            except Exception as group_error:
                logger.debug(f"计算消费者组 {group_id} 速率失败: {group_error}")
                continue

        if active_groups > 0:
            logger.debug(f"Topic {topic_name} 总消费速率: {total_consumption_rate:.2f} msg/s (来自 {active_groups} 个活跃消费者组)")
            return total_consumption_rate
        else:
            logger.debug(f"Topic {topic_name} 没有活跃的消费者组")
            return 0.0

    except Exception as e:
        logger.warning(f"计算Topic {topic_name} 消费速率失败: {e}")
        return 0.0


async def _calculate_topic_bytes_rate(topic_name: str) -> float:
    """计算特定Topic的字节速率"""
    try:
        # 获取消息速率
        message_rate = await _calculate_topic_message_rate(topic_name)

        if message_rate <= 0:
            return 0.0

        # 估算平均消息大小 (基于经验值)
        # 实际应用中应该从Kafka JMX指标获取真实的字节统计
        estimated_avg_message_size = 1024  # 假设平均消息大小为1KB

        # 计算字节速率
        bytes_rate = message_rate * estimated_avg_message_size

        logger.debug(f"Topic {topic_name} 字节速率估算: {bytes_rate:.2f} bytes/s (基于消息速率 {message_rate:.2f} msg/s)")
        return bytes_rate

    except Exception as e:
        logger.warning(f"计算Topic {topic_name} 字节速率失败: {e}")
        return 0.0


@router.get("/overview", response_model=MonitorOverview)
async def get_monitor_overview():
    """
    获取监控概览信息
    """
    try:
        kafka_client = get_kafka_client()
        
        # 获取集群信息
        admin = await kafka_client.get_admin_client()

        # 获取真实的broker数量
        try:
            # 获取真实的broker数量
            broker_count = await kafka_client.get_broker_count()
            logger.info(f"监控概览 - get_broker_count() 返回: {broker_count}")

            # 尝试获取完整的集群元数据
            cluster_metadata = await kafka_client.get_cluster_metadata()
            logger.info(f"监控概览 - get_cluster_metadata() 返回: {cluster_metadata}")

            if cluster_metadata:
                brokers_from_metadata = cluster_metadata.get('brokers', [])
                logger.info(f"监控概览 - 元数据中的brokers数量: {len(brokers_from_metadata)}")

                cluster_info = ClusterInfo(
                    broker_count=len(brokers_from_metadata),
                    controller_id=cluster_metadata.get('controller_id', 0),
                    cluster_id=cluster_metadata.get('cluster_id', 'kafka-cluster'),
                    brokers=brokers_from_metadata
                )
            else:
                # 如果无法获取完整元数据，至少使用真实的broker数量
                logger.info(f"监控概览 - 使用fallback，broker_count: {broker_count}")
                cluster_info = ClusterInfo(
                    broker_count=broker_count,
                    controller_id=0,
                    cluster_id="kafka-cluster",
                    brokers=[
                        {
                            "id": i,
                            "host": "unknown",
                            "port": 9092,
                            "rack": None
                        } for i in range(broker_count)
                    ]
                )
        except Exception as cluster_error:
            logger.warning(f"获取集群元数据失败，使用默认值: {cluster_error}")
            cluster_info = ClusterInfo(
                broker_count=1,
                controller_id=0,
                cluster_id="unknown",
                brokers=[{"id": 0, "host": "localhost", "port": 9092, "rack": None}]
            )
        
        # 获取Topic列表
        topics = await kafka_client.list_topics()
        user_topics = [topic for topic in topics if not topic.startswith('__')]
        
        # 计算总分区数 - 简化版本，与Topic统计API保持一致
        total_partitions = 0
        for topic in user_topics:
            try:
                topic_metadata = await admin.describe_topics([topic])
                if topic in topic_metadata:
                    total_partitions += len(topic_metadata[topic].partitions)
                else:
                    # 如果无法获取元数据，默认1个分区
                    total_partitions += 1
            except Exception as partition_error:
                logger.warning(f"获取Topic {topic} 分区信息失败: {partition_error}")
                # 默认假设每个Topic有1个分区
                total_partitions += 1

        logger.info(f"计算得到总分区数: {total_partitions} (来自 {len(user_topics)} 个Topic)")
        
        # 获取消费者组数量 - 与消费者组API保持一致
        try:
            consumer_groups = await admin.list_consumer_groups()
            consumer_group_count = len(consumer_groups) if consumer_groups else 0

            # 如果获取失败但有Topic，推断至少有1个消费者组
            if consumer_group_count == 0 and len(user_topics) > 0:
                consumer_group_count = 1

            logger.info(f"获取到消费者组数量: {consumer_group_count}")
        except Exception as consumer_error:
            logger.warning(f"获取消费者组列表失败: {consumer_error}")
            # 如果有Topic，推断至少有1个消费者组
            consumer_group_count = 1 if len(user_topics) > 0 else 0
        
        # 计算运行时间（简化版本）
        uptime = "运行中"

        # 获取真实的活跃连接数（这里可以是消费者组数量或其他合适的指标）
        active_connections = consumer_group_count

        return MonitorOverview(
            cluster_info=cluster_info,
            topic_count=len(user_topics),
            consumer_group_count=consumer_group_count,
            total_partitions=total_partitions,
            active_connections=active_connections,
            uptime=uptime
        )
        
    except Exception as e:
        logger.error(f"获取监控概览失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取监控概览失败: {str(e)}")


@router.get("/topics", response_model=List[TopicStats])
async def get_topic_statistics():
    """
    获取所有Topic的统计信息
    """
    try:
        kafka_client = get_kafka_client()
        admin = await kafka_client.get_admin_client()
        
        # 获取Topic列表
        topics = await kafka_client.list_topics()
        user_topics = [topic for topic in topics if not topic.startswith('__')]
        
        topic_stats = []
        
        for topic in user_topics:
            try:
                # 简化的Topic统计 - 先获取基本信息
                partition_count = 1  # 默认1个分区
                replication_factor = 1  # 默认1个副本
                latest_offset = 0
                earliest_offset = 0

                try:
                    # 尝试获取Topic元数据
                    topic_metadata = await admin.describe_topics([topic])
                    if topic in topic_metadata:
                        topic_info = topic_metadata[topic]
                        partition_count = len(topic_info.partitions)
                        replication_factor = len(topic_info.partitions[0].replicas) if topic_info.partitions else 1
                except Exception as meta_error:
                    logger.warning(f"获取Topic {topic} 元数据失败，使用默认值: {meta_error}")

                # 获取真实的偏移量数据
                latest_offset, earliest_offset = await _get_topic_offsets(kafka_client, topic, partition_count)

                # 注意：偏移量历史记录现在由后台任务收集，这里不再重复存储

                topic_stats.append(TopicStats(
                    name=topic,
                    partition_count=partition_count,
                    replication_factor=replication_factor,
                    latest_offset=latest_offset,
                    earliest_offset=earliest_offset
                ))

                logger.info(f"Topic {topic} 统计信息已添加: {partition_count} 分区, 偏移量范围 {earliest_offset}-{latest_offset}")

            except Exception as topic_error:
                logger.error(f"获取Topic {topic} 统计信息失败: {topic_error}")
                # 即使出错也添加基本信息
                topic_stats.append(TopicStats(
                    name=topic,
                    partition_count=1,
                    replication_factor=1,
                    latest_offset=50,
                    earliest_offset=0
                ))
        
        return topic_stats
        
    except Exception as e:
        logger.error(f"获取Topic统计信息失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取Topic统计信息失败: {str(e)}")


@router.get("/consumer-groups", response_model=List[ConsumerGroupInfo])
async def get_consumer_groups():
    """
    获取消费者组信息
    """
    try:
        kafka_client = get_kafka_client()

        # 简化的消费者组获取 - 使用Kafka命令行工具的方式
        consumer_group_infos = []

        try:
            # 尝试使用aiokafka获取消费者组
            admin = await kafka_client.get_admin_client()
            consumer_groups = await admin.list_consumer_groups()

            logger.info(f"获取到消费者组列表: {consumer_groups}")

            # 处理不同格式的返回值
            if consumer_groups:
                for group in consumer_groups:
                    try:
                        # 处理不同的数据格式
                        if hasattr(group, 'group_id'):
                            group_id = group.group_id
                        elif isinstance(group, tuple) and len(group) >= 1:
                            group_id = group[0]
                        elif isinstance(group, str):
                            group_id = group
                        else:
                            logger.warning(f"未知的消费者组格式: {group}")
                            continue

                        # 创建简化的消费者组信息
                        consumer_group_infos.append(ConsumerGroupInfo(
                            group_id=group_id,
                            state="STABLE",  # 简化状态
                            protocol="consumer",
                            protocol_type="consumer",
                            members=[]  # 简化成员信息
                        ))

                        logger.info(f"添加消费者组: {group_id}")

                    except Exception as group_error:
                        logger.warning(f"处理消费者组失败: {group_error}")
                        continue

        except Exception as list_error:
            logger.warning(f"获取消费者组列表失败: {list_error}")

            # 如果API失败，尝试从我们的应用中推断消费者组
            # 我们知道数据查看器和其他功能会创建消费者组
            try:
                # 检查是否有活跃的消费者连接
                topics = await kafka_client.list_topics()
                if topics:
                    # 如果有Topic，很可能有消费者组在使用
                    consumer_group_infos.append(ConsumerGroupInfo(
                        group_id="kmsg-ui-consumer",
                        state="STABLE",
                        protocol="consumer",
                        protocol_type="consumer",
                        members=[]
                    ))
                    logger.info("添加推断的消费者组: kmsg-ui-consumer")

            except Exception as infer_error:
                logger.warning(f"推断消费者组失败: {infer_error}")

        logger.info(f"最终返回 {len(consumer_group_infos)} 个消费者组")
        return consumer_group_infos

    except Exception as e:
        logger.error(f"获取消费者组信息失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取消费者组信息失败: {str(e)}")


@router.get("/topic/{topic_name}/metrics", response_model=TopicMetrics)
async def get_topic_metrics(topic_name: str):
    """
    获取指定Topic的性能指标
    """
    try:
        kafka_client = get_kafka_client()
        admin = await kafka_client.get_admin_client()
        
        # 获取Topic元数据
        topic_metadata = await admin.describe_topics([topic_name])
        if topic_name not in topic_metadata:
            raise HTTPException(status_code=404, detail=f"Topic '{topic_name}' 不存在")
        
        topic_info = topic_metadata[topic_name]
        
        # 获取分区指标
        partition_metrics = []
        for partition in topic_info.partitions:
            try:
                # 这里可以添加更详细的分区指标收集
                partition_metrics.append({
                    "partition": partition.partition,
                    "leader": partition.leader,
                    "replicas": partition.replicas,
                    "isr": partition.isr,
                    "size": 0,  # 需要实现具体的大小计算
                    "messages": 0  # 需要实现具体的消息数计算
                })
            except Exception as partition_error:
                logger.warning(f"获取分区 {partition.partition} 指标失败: {partition_error}")
                continue
        
        # 计算Topic的性能指标
        topic_messages_per_second = await _calculate_topic_message_rate(topic_name)
        topic_bytes_per_second = await _calculate_topic_bytes_rate(topic_name)

        return TopicMetrics(
            topic=topic_name,
            messages_per_second=topic_messages_per_second,
            bytes_per_second=topic_bytes_per_second,
            partition_metrics=partition_metrics
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取Topic指标失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取Topic指标失败: {str(e)}")


@router.get("/health")
async def get_system_health():
    """
    获取系统健康状态
    """
    try:
        kafka_client = get_kafka_client()

        # 连接测试并获取真实的broker数量
        try:
            topics = await kafka_client.list_topics()
            # 获取真实的broker数量
            brokers = await kafka_client.get_broker_count()
            success = True
        except Exception as conn_error:
            logger.warning(f"Kafka连接测试失败: {conn_error}")
            success = False
            brokers = 0

        health_status = {
            "status": "healthy" if success else "unhealthy",
            "kafka_connection": success,
            "kafka_brokers": brokers,
            "timestamp": datetime.now().isoformat(),
            "uptime": "运行中",
            "version": "1.0.0"
        }

        return health_status
        
    except Exception as e:
        logger.error(f"获取系统健康状态失败: {e}")
        return {
            "status": "unhealthy",
            "kafka_connection": False,
            "kafka_brokers": 0,
            "timestamp": datetime.now().isoformat(),
            "error": str(e),
            "version": "1.0.0"
        }


@router.get("/performance")
async def get_performance_metrics():
    """
    获取性能指标
    """
    try:
        import psutil

        # 获取真实的系统性能指标
        try:
            cpu_usage = psutil.cpu_percent(interval=0.1)
            memory = psutil.virtual_memory()
            disk = psutil.disk_usage('/')
            network = psutil.net_io_counters()

            memory_usage = memory.percent
            disk_usage = disk.percent
            network_bytes_in = network.bytes_recv
            network_bytes_out = network.bytes_sent

        except Exception as psutil_error:
            logger.warning(f"获取系统指标失败，使用默认值: {psutil_error}")
            # 使用默认值，不使用随机数
            cpu_usage = 0.0
            memory_usage = 0.0
            disk_usage = 0.0
            network_bytes_in = 0
            network_bytes_out = 0

        # 获取真实的Kafka指标
        kafka_client = get_kafka_client()
        try:
            topics = await kafka_client.list_topics()
            active_connections = len(topics) if topics else 0

            # 计算真实的消息速率（基于历史数据）
            messages_per_second = await _calculate_message_rate()
            bytes_per_second = await _calculate_bytes_rate()

        except Exception as kafka_error:
            logger.warning(f"获取Kafka指标失败: {kafka_error}")
            active_connections = 0
            messages_per_second = 0.0
            bytes_per_second = 0.0

        performance_metrics = {
            "timestamp": datetime.now().isoformat(),
            "cpu_usage": round(cpu_usage, 1),
            "memory_usage": round(memory_usage, 1),
            "disk_usage": round(disk_usage, 1),
            "network_io": {
                "bytes_in": network_bytes_in,
                "bytes_out": network_bytes_out
            },
            "kafka_metrics": {
                "messages_per_second": round(messages_per_second, 1),
                "bytes_per_second": round(bytes_per_second, 1),
                "active_connections": active_connections
            }
        }

        # 存储历史数据到Redis
        historical_point = HistoricalDataPoint(
            timestamp=performance_metrics["timestamp"],
            cpu_usage=performance_metrics["cpu_usage"],
            memory_usage=performance_metrics["memory_usage"],
            disk_usage=performance_metrics["disk_usage"],
            network_bytes_in=network_bytes_in,
            network_bytes_out=network_bytes_out,
            kafka_messages_per_second=performance_metrics["kafka_metrics"]["messages_per_second"],
            kafka_bytes_per_second=performance_metrics["kafka_metrics"]["bytes_per_second"],
            kafka_active_connections=performance_metrics["kafka_metrics"]["active_connections"]
        )

        # 异步存储到Redis（不阻塞响应）
        asyncio.create_task(store_performance_data_point(historical_point))

        return performance_metrics
        
    except Exception as e:
        logger.error(f"获取性能指标失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取性能指标失败: {str(e)}")


@router.get("/historical", response_model=HistoricalData)
async def get_historical_data(
    time_range: str = Query("1h", description="时间范围: 1h, 6h, 24h, 7d"),
    metric: str = Query("all", description="指标类型: all, cpu, memory, disk, network, kafka")
):
    """
    获取历史性能数据
    """
    try:
        # 从Redis获取历史数据
        # 根据时间范围调整数据点数量
        max_points = {
            "1h": 60,      # 1小时：60个点（每分钟1个）
            "6h": 180,     # 6小时：180个点（每2分钟1个）
            "24h": 288,    # 24小时：288个点（每5分钟1个）
            "7d": 336      # 7天：336个点（每30分钟1个）
        }.get(time_range, 100)

        filtered_data = await get_performance_history_from_redis(time_range, max_points)

        # 计算摘要统计
        if filtered_data:
            cpu_values = [p.cpu_usage for p in filtered_data]
            memory_values = [p.memory_usage for p in filtered_data]
            disk_values = [p.disk_usage for p in filtered_data]

            summary = {
                "cpu": {
                    "avg": round(sum(cpu_values) / len(cpu_values), 1),
                    "max": round(max(cpu_values), 1),
                    "min": round(min(cpu_values), 1)
                },
                "memory": {
                    "avg": round(sum(memory_values) / len(memory_values), 1),
                    "max": round(max(memory_values), 1),
                    "min": round(min(memory_values), 1)
                },
                "disk": {
                    "avg": round(sum(disk_values) / len(disk_values), 1),
                    "max": round(max(disk_values), 1),
                    "min": round(min(disk_values), 1)
                },
                "data_points": len(filtered_data),
                "time_range": time_range
            }
        else:
            summary = {
                "cpu": {"avg": 0, "max": 0, "min": 0},
                "memory": {"avg": 0, "max": 0, "min": 0},
                "disk": {"avg": 0, "max": 0, "min": 0},
                "data_points": 0,
                "time_range": time_range
            }

        return HistoricalData(
            timeRange=time_range,
            dataPoints=filtered_data,
            summary=summary
        )

    except Exception as e:
        logger.error(f"获取历史数据失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取历史数据失败: {str(e)}")


@router.get("/metrics/realtime")
async def get_realtime_metrics():
    """
    获取实时指标数据（用于图表更新）
    """
    try:
        # 获取最新的性能数据
        performance_data = await get_performance_metrics()

        # 获取最近10个数据点用于实时图表
        recent_history = await get_performance_history_from_redis("1h", 10)
        recent_data = recent_history[-10:] if len(recent_history) >= 10 else recent_history

        return {
            "current": performance_data,
            "recent": [
                {
                    "timestamp": point.timestamp,
                    "cpu_usage": point.cpu_usage,
                    "memory_usage": point.memory_usage,
                    "disk_usage": point.disk_usage,
                    "kafka_messages_per_second": point.kafka_messages_per_second
                }
                for point in recent_data
            ]
        }

    except Exception as e:
        logger.error(f"获取实时指标失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取实时指标失败: {str(e)}")


@router.get("/data/stats")
async def get_data_stats():
    """
    获取历史数据统计信息
    """
    try:
        # 获取Redis中的数据统计
        client = await redis_manager.get_client()

        # 获取历史数据数量
        history_count = await client.llen(REDIS_PERFORMANCE_HISTORY_KEY)

        # 获取最新数据时间
        latest_data = await get_latest_performance_data()
        latest_timestamp = latest_data.timestamp if latest_data else None

        # 获取最旧数据时间
        oldest_data = None
        if history_count > 0:
            oldest_raw = await redis_manager.lrange(REDIS_PERFORMANCE_HISTORY_KEY, -1, -1)
            if oldest_raw:
                oldest_data = HistoricalDataPoint(**oldest_raw[0])

        # 计算数据时间跨度
        time_span_hours = 0
        if latest_data and oldest_data:
            latest_time = datetime.fromisoformat(latest_data.timestamp.replace('Z', ''))
            oldest_time = datetime.fromisoformat(oldest_data.timestamp.replace('Z', ''))
            time_span_hours = (latest_time - oldest_time).total_seconds() / 3600

        return {
            "total_data_points": history_count,
            "max_data_points": MAX_HISTORY_POINTS,
            "latest_timestamp": latest_timestamp,
            "oldest_timestamp": oldest_data.timestamp if oldest_data else None,
            "time_span_hours": round(time_span_hours, 2),
            "storage_usage_percent": round((history_count / MAX_HISTORY_POINTS) * 100, 1),
            "redis_keys": {
                "history_key": REDIS_PERFORMANCE_HISTORY_KEY,
                "latest_key": REDIS_PERFORMANCE_LATEST_KEY
            }
        }

    except Exception as e:
        logger.error(f"获取数据统计失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取数据统计失败: {str(e)}")


@router.delete("/data/clear")
async def clear_historical_data():
    """
    清空历史数据（管理员功能）
    """
    try:
        # 删除Redis中的历史数据
        deleted_count = await redis_manager.delete(
            REDIS_PERFORMANCE_HISTORY_KEY,
            REDIS_PERFORMANCE_LATEST_KEY
        )

        logger.info(f"已清空历史数据，删除了 {deleted_count} 个Redis键")

        return {
            "success": True,
            "message": f"历史数据已清空，删除了 {deleted_count} 个数据键",
            "deleted_keys": deleted_count
        }

    except Exception as e:
        logger.error(f"清空历史数据失败: {e}")
        raise HTTPException(status_code=500, detail=f"清空历史数据失败: {str(e)}")


@router.post("/clean-cache")
async def clean_monitor_cache():
    """清理监控缓存数据"""
    try:
        from app.core.redis_client import redis_manager

        redis_client = await redis_manager.get_client()

        # 获取所有监控相关的key
        pattern = RedisKeys.get_all_monitor_keys_pattern()
        keys = await redis_client.keys(pattern)

        if keys:
            # 删除所有监控数据
            await redis_client.delete(*keys)
            logger.info(f"清理了 {len(keys)} 个监控缓存key")

            return {
                "success": True,
                "message": f"成功清理了 {len(keys)} 个监控缓存",
                "cleaned_keys": len(keys)
            }
        else:
            return {
                "success": True,
                "message": "没有找到需要清理的监控缓存",
                "cleaned_keys": 0
            }

    except Exception as e:
        logger.error(f"清理监控缓存失败: {e}")
        raise HTTPException(status_code=500, detail=f"清理监控缓存失败: {str(e)}")


@router.get("/consumer-groups/metrics")
async def get_consumer_groups_metrics():
    """获取消费者组详细指标"""
    try:
        metrics = await enhanced_monitor.collect_consumer_group_metrics()
        return {
            "success": True,
            "data": metrics,
            "timestamp": datetime.now().isoformat()
        }
    except Exception as e:
        logger.error(f"获取消费者组指标失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取消费者组指标失败: {str(e)}")


@router.get("/consumer-groups/{group_id}/lag-history")
async def get_consumer_group_lag_history(
    group_id: str,
    topic_name: Optional[str] = None,
    hours: int = Query(default=24, ge=1, le=168)  # 1小时到7天
):
    """获取消费者组Lag历史数据"""
    try:
        if topic_name:
            # 获取特定Topic的Lag历史
            lag_key = RedisKeys.monitor_consumer_group_lag(group_id, topic_name)
            lag_data = await redis_manager.lrange(lag_key, 0, hours * 6)  # 每10分钟一个点
        else:
            # 获取所有Topic的Lag历史
            lag_data = []
            # 这里需要实现获取该消费者组所有Topic的逻辑

        # 解析和格式化数据
        formatted_data = []
        for data in lag_data:
            try:
                if isinstance(data, str):
                    record = json.loads(data)
                else:
                    record = data
                formatted_data.append(record)
            except Exception:
                continue

        return {
            "success": True,
            "data": {
                "group_id": group_id,
                "topic_name": topic_name,
                "lag_history": formatted_data,
                "total_records": len(formatted_data)
            }
        }
    except Exception as e:
        logger.error(f"获取消费者组Lag历史失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取消费者组Lag历史失败: {str(e)}")


# 吞吐量监控功能已删除


@router.get("/cluster/health")
async def get_cluster_health():
    """获取集群健康状态"""
    try:
        health_metrics = await enhanced_monitor.collect_cluster_health_metrics()
        return {
            "success": True,
            "data": health_metrics,
            "timestamp": datetime.now().isoformat()
        }
    except Exception as e:
        logger.error(f"获取集群健康状态失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取集群健康状态失败: {str(e)}")


@router.get("/cluster/health-history")
async def get_cluster_health_history(
    hours: int = Query(default=24, ge=1, le=168)
):
    """获取集群健康状态历史"""
    try:
        health_key = RedisKeys.monitor_cluster_health()
        health_data = await redis_manager.lrange(health_key, 0, hours * 6)

        formatted_data = []
        for data in health_data:
            try:
                if isinstance(data, str):
                    record = json.loads(data)
                else:
                    record = data
                formatted_data.append(record)
            except Exception:
                continue

        return {
            "success": True,
            "data": {
                "health_history": formatted_data,
                "total_records": len(formatted_data)
            }
        }
    except Exception as e:
        logger.error(f"获取集群健康历史失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取集群健康历史失败: {str(e)}")


@router.get("/reports/summary")
async def get_monitoring_summary_report():
    """获取监控摘要报告"""
    try:
        # 收集各种指标
        performance_metrics = await get_performance_metrics()
        consumer_metrics = await enhanced_monitor.collect_consumer_group_metrics()
        health_metrics = await enhanced_monitor.collect_cluster_health_metrics()

        # 生成摘要报告
        report = {
            "report_time": datetime.now().isoformat(),
            "cluster_overview": {
                "broker_count": health_metrics.get("broker_count", 0),
                "topic_count": health_metrics.get("topic_count", 0),
                "consumer_group_count": consumer_metrics.get("total_groups", 0),
                "active_consumer_groups": consumer_metrics.get("active_groups", 0),
                "overall_health": health_metrics.get("overall_health", "unknown")
            },
            "performance_summary": {
                "total_consumer_lag": consumer_metrics.get("total_lag", 0),
                "under_replicated_partitions": health_metrics.get("under_replicated_partitions", 0),
                "cpu_usage": performance_metrics.get("cpu_usage", 0),
                "memory_usage": performance_metrics.get("memory_usage", 0),
                "disk_usage": performance_metrics.get("disk_usage", 0)
            },
            "top_topics": [],
            "problematic_consumer_groups": [],
            "alerts": health_metrics.get("issues", [])
        }

        # Top topics功能已移除（基于吞吐量）

        # 找出有问题的消费者组
        for group_id, group_data in consumer_metrics.get("consumer_groups", {}).items():
            if group_data.get("total_lag", 0) > 1000:  # Lag超过1000的消费者组
                report["problematic_consumer_groups"].append({
                    "group_id": group_id,
                    "total_lag": group_data.get("total_lag", 0),
                    "member_count": group_data.get("members", 0),
                    "state": group_data.get("state", "Unknown")
                })

        return {
            "success": True,
            "data": report
        }
    except Exception as e:
        logger.error(f"生成监控摘要报告失败: {e}")
        raise HTTPException(status_code=500, detail=f"生成监控摘要报告失败: {str(e)}")
