"""
后台任务模块
用于定期收集监控数据和维护系统状态
"""

import asyncio
import json
from datetime import datetime
from typing import Dict, Any
import logging

from app.core.kafka_client import get_kafka_client
from app.core.redis_client import redis_manager
from app.core.redis_keys import RedisKeys
from app.core.enhanced_monitor import enhanced_monitor
from app.utils.logger import get_logger

logger = get_logger(__name__)

class BackgroundTaskManager:
    """后台任务管理器"""
    
    def __init__(self):
        self.running = False
        self.tasks: Dict[str, asyncio.Task] = {}
    
    async def start(self):
        """启动所有后台任务"""
        if self.running:
            return
        
        self.running = True
        logger.info("启动后台任务管理器")
        
        # 启动监控数据收集任务
        self.tasks['monitor_collector'] = asyncio.create_task(
            self._monitor_data_collector()
        )
        
        # 启动增强监控数据收集任务
        self.tasks['enhanced_monitor'] = asyncio.create_task(
            self._enhanced_monitor_collector()
        )

        # 启动数据清理任务
        self.tasks['data_cleaner'] = asyncio.create_task(
            self._data_cleaner()
        )

        # 启动消费者组清理任务（每天一次）
        self.tasks['consumer_group_cleaner'] = asyncio.create_task(
            self._consumer_group_cleaner()
        )

        logger.info(f"已启动 {len(self.tasks)} 个后台任务")
    
    async def stop(self):
        """停止所有后台任务"""
        if not self.running:
            return
        
        self.running = False
        logger.info("停止后台任务管理器")
        
        # 取消所有任务
        for task_name, task in self.tasks.items():
            if not task.done():
                task.cancel()
                try:
                    await task
                except asyncio.CancelledError:
                    logger.info(f"后台任务 {task_name} 已取消")
        
        self.tasks.clear()
        logger.info("所有后台任务已停止")
    
    async def _monitor_data_collector(self):
        """监控数据收集任务"""
        logger.info("启动监控数据收集任务")
        
        while self.running:
            try:
                await self._collect_topic_offsets()
                await self._collect_system_metrics()
                
                # 每10秒收集一次数据
                await asyncio.sleep(10)
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"监控数据收集失败: {e}")
                await asyncio.sleep(30)  # 出错时等待30秒再重试
    
    async def _collect_topic_offsets(self):
        """收集Topic偏移量数据"""
        try:
            kafka_client = get_kafka_client()
            
            # 获取所有Topic
            topics = await kafka_client.list_topics()
            
            for topic in topics:
                try:
                    # 跳过内部Topic
                    if topic.startswith('__'):
                        logger.debug(f"跳过内部Topic: {topic}")
                        continue

                    # 获取Topic的分区信息
                    try:
                        partitions = await kafka_client.get_topic_partitions(topic)
                        partition_count = len(partitions) if partitions else 1
                    except Exception as partition_error:
                        logger.warning(f"获取Topic {topic} 分区信息失败: {partition_error}")
                        # 使用默认分区数
                        partition_count = 1

                    # 获取偏移量信息
                    latest_offset, earliest_offset = await self._get_topic_offsets(
                        kafka_client, topic, partition_count
                    )

                    # 存储偏移量历史记录
                    await self._store_topic_offset_history(topic, latest_offset, earliest_offset)

                    logger.info(f"收集Topic {topic} 偏移量: latest={latest_offset}, earliest={earliest_offset}")

                except Exception as e:
                    # 检查是否是Topic不存在的错误
                    if "UnknownTopicOrPartitionError" in str(e) or "UnknownTopicOrPartition" in str(e):
                        logger.debug(f"Topic {topic} 不存在或已被删除，跳过偏移量收集")
                    else:
                        logger.warning(f"收集Topic {topic} 偏移量失败: {e}")
                    
        except Exception as e:
            logger.error(f"收集Topic偏移量数据失败: {e}")
    
    async def _get_topic_offsets(self, kafka_client, topic: str, partition_count: int) -> tuple[int, int]:
        """获取Topic的偏移量信息"""
        try:
            total_latest = 0
            total_earliest = 0
            
            for partition in range(partition_count):
                try:
                    # 获取分区的最新和最早偏移量
                    latest = await kafka_client.get_partition_latest_offset(topic, partition)
                    earliest = await kafka_client.get_partition_earliest_offset(topic, partition)
                    
                    total_latest += latest
                    total_earliest += earliest
                    
                except Exception as e:
                    # 检查是否是Topic不存在的错误
                    if "UnknownTopicOrPartitionError" in str(e) or "UnknownTopicOrPartition" in str(e):
                        logger.debug(f"Topic {topic} 分区 {partition} 不存在或已被删除")
                        break  # 如果Topic被删除，跳出分区循环
                    else:
                        logger.warning(f"获取Topic {topic} 分区 {partition} 偏移量失败: {e}")
            
            return total_latest, total_earliest
            
        except Exception as e:
            logger.error(f"获取Topic {topic} 偏移量失败: {e}")
            return 0, 0
    
    async def _store_topic_offset_history(self, topic_name: str, latest_offset: int, earliest_offset: int):
        """存储Topic偏移量历史记录到Redis"""
        try:
            topic_history_key = RedisKeys.monitor_topic_offsets(topic_name)
            
            # 创建偏移量记录
            offset_record = {
                'timestamp': datetime.now().isoformat(),
                'latest_offset': latest_offset,
                'earliest_offset': earliest_offset,
                'message_count': latest_offset - earliest_offset
            }
            
            # 存储到Redis列表（最新的在前面）
            redis_client = await redis_manager.get_client()
            await redis_client.lpush(topic_history_key, json.dumps(offset_record))

            # 只保留最近100个记录
            await redis_client.ltrim(topic_history_key, 0, 99)

            # 设置过期时间（24小时）
            await redis_client.expire(topic_history_key, 24 * 3600)
            
            logger.debug(f"存储Topic {topic_name} 偏移量历史: latest={latest_offset}, earliest={earliest_offset}")
            
        except Exception as e:
            logger.error(f"存储Topic {topic_name} 偏移量历史失败: {e}")
    
    async def _collect_system_metrics(self):
        """收集系统指标"""
        try:
            import psutil
            
            # 收集CPU、内存、磁盘使用率
            cpu_percent = psutil.cpu_percent(interval=1)
            memory = psutil.virtual_memory()
            disk = psutil.disk_usage('/')
            
            # 存储系统指标
            system_metrics = {
                'timestamp': datetime.now().isoformat(),
                'cpu_percent': cpu_percent,
                'memory_percent': memory.percent,
                'memory_used': memory.used,
                'memory_total': memory.total,
                'disk_percent': disk.percent,
                'disk_used': disk.used,
                'disk_total': disk.total
            }
            
            # 存储到Redis
            metrics_key = RedisKeys.monitor_system_metrics()
            redis_client = await redis_manager.get_client()
            await redis_client.lpush(metrics_key, json.dumps(system_metrics))
            await redis_client.ltrim(metrics_key, 0, 99)  # 只保留最近100个记录
            await redis_client.expire(metrics_key, 24 * 3600)
            
            logger.debug(f"收集系统指标: CPU={cpu_percent}%, Memory={memory.percent}%, Disk={disk.percent}%")
            
        except Exception as e:
            logger.warning(f"收集系统指标失败: {e}")

    async def _enhanced_monitor_collector(self):
        """增强监控数据收集任务"""
        logger.info("启动增强监控数据收集任务")

        while self.running:
            try:
                # 收集消费者组指标 (每30秒)
                await enhanced_monitor.collect_consumer_group_metrics()
                logger.debug("收集消费者组指标完成")

                await asyncio.sleep(20)  # 增加间隔时间，因为删除了吞吐量收集

                # 收集集群健康指标 (每30秒)
                await enhanced_monitor.collect_cluster_health_metrics()
                logger.debug("收集集群健康指标完成")

                await asyncio.sleep(10)

            except asyncio.CancelledError:
                logger.info("增强监控数据收集任务被取消")
                break
            except Exception as e:
                logger.error(f"增强监控数据收集任务异常: {e}")
                await asyncio.sleep(30)  # 出错后等待30秒再重试

    async def _data_cleaner(self):
        """数据清理任务"""
        logger.info("启动数据清理任务")
        
        while self.running:
            try:
                await self._clean_old_data()
                
                # 每小时清理一次
                await asyncio.sleep(3600)
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"数据清理失败: {e}")
                await asyncio.sleep(3600)
    
    async def _clean_old_data(self):
        """清理过期数据"""
        try:
            # 清理过期的监控数据
            pattern = RedisKeys.get_all_monitor_keys_pattern()
            redis_client = await redis_manager.get_client()
            keys = await redis_client.keys(pattern)

            cleaned_count = 0
            for key in keys:
                try:
                    # 检查key是否过期
                    ttl = await redis_client.ttl(key)
                    if ttl == -1:  # 没有设置过期时间
                        await redis_client.expire(key, 24 * 3600)  # 设置24小时过期
                        cleaned_count += 1

                    # 清理格式不一致的偏移量数据
                    key_str = key.decode('utf-8') if isinstance(key, bytes) else key
                    if key_str.endswith(":offsets"):
                        await self._clean_inconsistent_offset_data(redis_client, key_str)

                except Exception as e:
                    logger.warning(f"清理key {key} 失败: {e}")

            if cleaned_count > 0:
                logger.info(f"数据清理完成，处理了 {cleaned_count} 个key")

        except Exception as e:
            logger.error(f"数据清理失败: {e}")

    async def _clean_inconsistent_offset_data(self, redis_client, key: str):
        """清理格式不一致的偏移量数据"""
        try:
            # 获取所有数据
            all_data = await redis_client.lrange(key, 0, -1)

            if not all_data:
                return

            # 检查是否有格式不一致的数据
            has_inconsistent = False
            for data in all_data:
                if isinstance(data, str) and '\\"' in data:
                    has_inconsistent = True
                    break

            if has_inconsistent:
                logger.info(f"发现格式不一致的数据，清理key: {key}")
                # 删除整个key，让后台任务重新收集
                await redis_client.delete(key)

        except Exception as e:
            logger.warning(f"清理不一致数据失败 {key}: {e}")

    async def _consumer_group_cleaner(self):
        """定时清理过期消费者组（每天一次）"""
        logger.info("启动消费者组清理任务")

        while self.running:
            try:
                # 每24小时执行一次清理
                await asyncio.sleep(24 * 60 * 60)  # 24小时

                if not self.running:
                    break

                logger.info("开始清理过期消费者组")
                await self._cleanup_expired_consumer_groups()

            except asyncio.CancelledError:
                logger.info("消费者组清理任务被取消")
                break
            except Exception as e:
                logger.error(f"消费者组清理任务异常: {e}")
                # 出错后等待1小时再重试
                await asyncio.sleep(60 * 60)

    async def _cleanup_expired_consumer_groups(self):
        """清理过期的消费者组"""
        try:
            kafka_client = get_kafka_client()

            # 获取所有消费者组
            consumer_groups = await kafka_client.list_consumer_groups()

            # 只清理当前系统创建的消费者组（以特定前缀标识）
            system_prefixes = [
                'kmsg-ui-',  # 当前系统的前缀
                'temp-consumer-',  # 临时消费者
                'data-viewer-',  # 数据查看器创建的消费者
            ]

            cleaned_count = 0
            for group_id in consumer_groups:
                try:
                    # 检查是否是系统创建的消费者组
                    is_system_group = any(group_id.startswith(prefix) for prefix in system_prefixes)

                    if not is_system_group:
                        continue

                    # 获取消费者组详情
                    group_info = await kafka_client.get_consumer_group_info(group_id)

                    # 检查消费者组是否为空或不存在
                    should_delete = False
                    delete_reason = ""

                    if not group_info:
                        # 消费者组不存在，但仍在列表中（僵尸消费者组）
                        should_delete = True
                        delete_reason = "僵尸消费者组（不存在但仍在列表中）"
                    elif group_info.get('state') == 'Empty':
                        # 消费者组为空
                        should_delete = True
                        delete_reason = "空消费者组"

                    if should_delete:
                        # 删除消费者组
                        success = await kafka_client.delete_consumer_group(group_id)
                        if success:
                            cleaned_count += 1
                            logger.info(f"已清理消费者组: {group_id} ({delete_reason})")
                        else:
                            logger.warning(f"清理消费者组失败: {group_id} ({delete_reason})")

                except Exception as e:
                    logger.warning(f"处理消费者组 {group_id} 时出错: {e}")

            logger.info(f"消费者组清理完成，共清理 {cleaned_count} 个过期消费者组")

        except Exception as e:
            logger.error(f"清理过期消费者组失败: {e}")


# 全局后台任务管理器实例
background_task_manager = BackgroundTaskManager()


async def start_background_tasks():
    """启动后台任务"""
    await background_task_manager.start()


async def stop_background_tasks():
    """停止后台任务"""
    await background_task_manager.stop()
