import json
import time
from typing import Dict, Any
from kafka import KafkaConsumer
from kafka.errors import KafkaError
import pandas as pd
from sqlalchemy import create_engine
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("ExceptionConsumer")


class ExceptionConsumer:
    """
    Kafka异常消费者，负责从Kafka接收异常数据并存储到数据库
    作为监控系统的数据来源
    """

    def __init__(
        self,
        bootstrap_servers: list = ["kafka-broker:9092"],
        topics: list = [
            "system-exceptions-critical",
            "system-exceptions-error",
            "system-exceptions-warning",
            "system-exceptions-info",
        ],
        db_connection: str = "postgresql://user:password@db-host:5432/monitoring_db",
    ):
        """
        初始化异常消费者

        Args:
            bootstrap_servers: Kafka broker地址列表
            topics: 要消费的topic列表
            db_connection: 数据库连接字符串
        """
        self.bootstrap_servers = bootstrap_servers
        self.topics = topics
        self.db_connection = db_connection

        # 初始化Kafka消费者
        self.consumer = self._init_consumer()

        # 初始化数据库连接
        self.db_engine = self._init_db_engine()

        # 批处理参数
        self.batch_size = 100  # 批处理大小
        self.batch_data = []  # 批处理数据缓存
        self.batch_interval = 5  # 批处理时间间隔(秒)
        self.last_batch_time = time.time()

    def _init_consumer(self) -> KafkaConsumer:
        """初始化Kafka消费者"""
        try:
            return KafkaConsumer(
                *self.topics,
                bootstrap_servers=self.bootstrap_servers,
                auto_offset_reset="latest",  # 从最新的消息开始消费
                enable_auto_commit=True,  # 自动提交offset
                group_id="exception-monitoring-group",
                value_deserializer=lambda m: json.loads(m.decode("utf-8")),
            )
        except KafkaError as e:
            logger.error(f"Kafka消费者初始化失败: {str(e)}")
            raise

    def _init_db_engine(self) -> Any:
        """初始化数据库引擎"""
        try:
            return create_engine(self.db_connection)
        except Exception as e:
            logger.error(f"数据库连接初始化失败: {str(e)}")
            raise

    def _clean_exception_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        清洗异常数据，确保符合数据库存储要求

        Args:
            data: 原始异常数据

        Returns:
            清洗后的异常数据
        """
        # 限制堆栈跟踪的长度，防止数据库字段溢出
        if "stack_trace" in data and len(data["stack_trace"]) > 8000:
            data["stack_trace"] = data["stack_trace"][:8000] + "...[truncated]"

        # 转换嵌套的上下文为JSON字符串
        if "context" in data and isinstance(data["context"], dict):
            data["context"] = json.dumps(data["context"])

        return data

    def _save_to_database(self, data_batch: list) -> None:
        """
        将异常数据批量保存到数据库

        Args:
            data_batch: 异常数据列表
        """
        if not data_batch:
            return

        try:
            # 转换为DataFrame
            df = pd.DataFrame(data_batch)

            # 保存到数据库
            df.to_sql(
                name="system_exceptions",  # 表名
                con=self.db_engine,
                if_exists="append",  # 追加模式
                index=False,
                chunksize=1000,
            )

            logger.info(f"成功保存 {len(data_batch)} 条异常数据到数据库")

        except Exception as e:
            logger.error(f"保存异常数据到数据库失败: {str(e)}")
            # 可以在这里实现失败重试逻辑

    def consume(self, run_forever: bool = True) -> None:
        """
        开始消费Kafka中的异常数据

        Args:
            run_forever: 是否一直运行
        """
        logger.info("开始消费异常数据...")

        try:
            for message in self.consumer:
                # 处理消息
                try:
                    exception_data = message.value
                    cleaned_data = self._clean_exception_data(exception_data)

                    # 添加到批处理缓存
                    self.batch_data.append(cleaned_data)

                    # 检查是否满足批处理条件
                    current_time = time.time()
                    if (
                        len(self.batch_data) >= self.batch_size
                        or current_time - self.last_batch_time >= self.batch_interval
                    ):

                        # 保存到数据库
                        self._save_to_database(self.batch_data)

                        # 重置批处理缓存
                        self.batch_data = []
                        self.last_batch_time = current_time

                except Exception as e:
                    logger.error(f"处理消息时发生错误: {str(e)}")
                    continue

                if not run_forever:
                    break

        except KeyboardInterrupt:
            logger.info("用户中断，停止消费")
        except Exception as e:
            logger.error(f"消费过程中发生错误: {str(e)}")
        finally:
            # 保存剩余的批处理数据
            if self.batch_data:
                self._save_to_database(self.batch_data)
            logger.info("停止消费异常数据")


if __name__ == "__main__":
    # 实际使用时替换为正确的配置
    consumer = ExceptionConsumer(
        bootstrap_servers=["localhost:9092"],
        db_connection="postgresql://monitor:password@localhost:5432/monitoring",
    )
    consumer.consume(run_forever=True)
