# -*- coding: utf-8 -*-
"""
Hive元数据库Binlog监控脚本
通过订阅MySQL Binlog，实时监控元数据表的变化
"""

import sys
import os
import time
import logging
from typing import Dict, Set, Optional
from datetime import datetime
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
    DeleteRowsEvent,
    UpdateRowsEvent,
    WriteRowsEvent,
)

# 添加父目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from common.config import config
from common.alert_manager import alert_manager, Alert, AlertLevel, AlertType

# 配置日志
logging.basicConfig(
    level=getattr(logging, config.log_level),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(config.log_file),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class BinlogMonitor:
    """Binlog监控器"""
    
    def __init__(self):
        self.config = config
        
        # 缓存：表ID到表名的映射
        self.table_id_cache: Dict[int, tuple] = {}  # {table_id: (db_name, table_name)}
        self.db_id_cache: Dict[int, str] = {}  # {db_id: db_name}
        
        # 表覆盖检测：{table_full_name: delete_timestamp}
        self.recent_deletes: Dict[str, float] = {}
        
        # 批量删除检测：[(table_name, timestamp)]
        self.recent_delete_list: list = []
        
        # 数据量变化缓存：{table_full_name: {old_value}}
        self.table_params_cache: Dict[str, Dict] = {}
    
    def _get_binlog_stream(self) -> BinLogStreamReader:
        """创建Binlog流读取器"""
        mysql_settings = {
            'host': self.config.mysql_host,
            'port': self.config.mysql_port,
            'user': self.config.mysql_user,
            'passwd': self.config.mysql_password
        }
        
        # 只监控Hive元数据库
        stream = BinLogStreamReader(
            connection_settings=mysql_settings,
            server_id=self.config.binlog_server_id,
            only_events=[DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent],
            only_schemas=[self.config.mysql_database],
            only_tables=['TBLS', 'DBS', 'TABLE_PARAMS', 'PARTITIONS'],
            resume_stream=True,
            blocking=True
        )
        
        return stream
    
    def _get_table_info(self, table_id: int) -> Optional[tuple]:
        """
        根据表ID获取表信息
        
        Returns:
            (db_name, table_name) 或 None
        """
        # 先查缓存
        if table_id in self.table_id_cache:
            return self.table_id_cache[table_id]
        
        # 查询数据库
        import pymysql
        try:
            conn = pymysql.connect(
                host=self.config.mysql_host,
                port=self.config.mysql_port,
                user=self.config.mysql_user,
                password=self.config.mysql_password,
                database=self.config.mysql_database
            )
            
            with conn.cursor() as cursor:
                cursor.execute("""
                    SELECT d.NAME as db_name, t.TBL_NAME as table_name
                    FROM TBLS t
                    JOIN DBS d ON t.DB_ID = d.DB_ID
                    WHERE t.TBL_ID = %s
                """, (table_id,))
                
                result = cursor.fetchone()
                if result:
                    info = (result[0], result[1])
                    self.table_id_cache[table_id] = info
                    return info
            
        except Exception as e:
            logger.error(f"查询表信息失败: table_id={table_id}, error={e}")
        finally:
            if 'conn' in locals():
                conn.close()
        
        return None
    
    def _get_db_name(self, db_id: int) -> Optional[str]:
        """根据数据库ID获取数据库名"""
        # 先查缓存
        if db_id in self.db_id_cache:
            return self.db_id_cache[db_id]
        
        # 查询数据库
        import pymysql
        try:
            conn = pymysql.connect(
                host=self.config.mysql_host,
                port=self.config.mysql_port,
                user=self.config.mysql_user,
                password=self.config.mysql_password,
                database=self.config.mysql_database
            )
            
            with conn.cursor() as cursor:
                cursor.execute("SELECT NAME FROM DBS WHERE DB_ID = %s", (db_id,))
                result = cursor.fetchone()
                if result:
                    db_name = result[0]
                    self.db_id_cache[db_id] = db_name
                    return db_name
        
        except Exception as e:
            logger.error(f"查询数据库名失败: db_id={db_id}, error={e}")
        finally:
            if 'conn' in locals():
                conn.close()
        
        return None
    
    def handle_tbls_delete(self, event: DeleteRowsEvent):
        """处理TBLS表的DELETE事件（删表）"""
        rule = self.config.get_alert_rule('drop_table')
        if not rule.get('enabled', True):
            return
        
        whitelist = rule.get('whitelist', [])
        blacklist = rule.get('blacklist', [])
        
        for row in event.rows:
            values = row['values']
            table_id = values.get('TBL_ID')
            
            # 获取表信息
            table_info = self._get_table_info(table_id)
            if not table_info:
                # 表已被删除，尝试从事件中获取信息
                db_id = values.get('DB_ID')
                table_name = values.get('TBL_NAME')
                db_name = self._get_db_name(db_id) if db_id else 'unknown'
            else:
                db_name, table_name = table_info
            
            full_table_name = f"{db_name}.{table_name}"
            
            # 记录删除操作（用于表覆盖检测）
            now = time.time()
            self.recent_deletes[full_table_name] = now
            self.recent_delete_list.append((full_table_name, now))
            
            # 检查白名单和黑名单
            in_whitelist = self._match_pattern(full_table_name, whitelist)
            in_blacklist = self._match_pattern(full_table_name, blacklist)
            
            if in_whitelist and not in_blacklist:
                logger.info(f"表 {full_table_name} 在白名单中，跳过告警")
                continue
            
            # 发送告警
            alert = Alert(
                alert_type=AlertType.DROP_TABLE,
                level=rule.get('level', AlertLevel.SEVERE),
                database=db_name,
                table=table_name,
                user=values.get('OWNER'),
                details={
                    '表ID': table_id,
                    '操作时间': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
            )
            
            alert_manager.send_alert(alert)
            logger.warning(f"检测到删除表: {full_table_name}")
            
            # 清理缓存
            if table_id in self.table_id_cache:
                del self.table_id_cache[table_id]
        
        # 检查批量删除
        self.check_batch_delete()
    
    def handle_tbls_insert(self, event: WriteRowsEvent):
        """处理TBLS表的INSERT事件（建表）"""
        rule = self.config.get_alert_rule('table_overwrite')
        if not rule.get('enabled', True):
            return
        
        time_window = rule.get('time_window', 300)
        now = time.time()
        
        for row in event.rows:
            values = row['values']
            db_id = values.get('DB_ID')
            table_name = values.get('TBL_NAME')
            
            db_name = self._get_db_name(db_id)
            if not db_name:
                continue
            
            full_table_name = f"{db_name}.{table_name}"
            
            # 检查是否在时间窗口内被删除过
            delete_time = self.recent_deletes.get(full_table_name)
            if delete_time and (now - delete_time) <= time_window:
                alert = Alert(
                    alert_type=AlertType.TABLE_OVERWRITE,
                    level=rule.get('level', AlertLevel.SEVERE),
                    database=db_name,
                    table=table_name,
                    user=values.get('OWNER'),
                    details={
                        '删除时间': datetime.fromtimestamp(delete_time).strftime('%Y-%m-%d %H:%M:%S'),
                        '重建时间': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        '时间间隔': f"{int(now - delete_time)}秒"
                    }
                )
                
                alert_manager.send_alert(alert)
                logger.warning(f"检测到表覆盖: {full_table_name}")
        
        # 清理过期的删除记录
        self.recent_deletes = {k: v for k, v in self.recent_deletes.items() 
                              if now - v <= time_window}
    
    def handle_dbs_delete(self, event: DeleteRowsEvent):
        """处理DBS表的DELETE事件（删库）"""
        rule = self.config.get_alert_rule('drop_database')
        if not rule.get('enabled', True):
            return
        
        whitelist = rule.get('whitelist', [])
        
        for row in event.rows:
            values = row['values']
            db_name = values.get('NAME')
            db_id = values.get('DB_ID')
            
            # 检查白名单
            if self._match_pattern(db_name, whitelist):
                logger.info(f"数据库 {db_name} 在白名单中，跳过告警")
                continue
            
            # 发送告警
            alert = Alert(
                alert_type=AlertType.DROP_DATABASE,
                level=rule.get('level', AlertLevel.CRITICAL),
                database=db_name,
                details={
                    '数据库ID': db_id,
                    '操作时间': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
            )
            
            alert_manager.send_alert(alert)
            logger.critical(f"检测到删除数据库: {db_name}")
            
            # 清理缓存
            if db_id in self.db_id_cache:
                del self.db_id_cache[db_id]
    
    def handle_table_params_update(self, event: UpdateRowsEvent):
        """处理TABLE_PARAMS表的UPDATE事件（数据量变化）"""
        rule = self.config.get_alert_rule('data_fluctuation')
        if not rule.get('enabled', True):
            return
        
        partition_threshold = rule.get('partition_change_threshold', 0.3)
        row_threshold = rule.get('row_count_threshold', 0.5)
        
        for row in event.rows:
            before_values = row['before_values']
            after_values = row['after_values']
            
            table_id = after_values.get('TBL_ID')
            param_key = after_values.get('PARAM_KEY')
            
            # 只关注行数变化
            if param_key != 'numRows':
                continue
            
            old_value = int(before_values.get('PARAM_VALUE', 0))
            new_value = int(after_values.get('PARAM_VALUE', 0))
            
            # 计算变化率
            if old_value == 0:
                continue
            
            change_rate = abs(new_value - old_value) / old_value
            
            # 判断是否需要告警
            if change_rate < row_threshold:
                continue
            
            # 获取表信息
            table_info = self._get_table_info(table_id)
            if not table_info:
                continue
            
            db_name, table_name = table_info
            
            # 确定告警级别
            alert_level = AlertLevel.WARNING if change_rate < 0.7 else AlertLevel.SEVERE
            
            # 发送告警
            alert = Alert(
                alert_type=AlertType.DATA_FLUCTUATION,
                level=alert_level,
                database=db_name,
                table=table_name,
                details={
                    '行数变化率': f"{change_rate * 100:.1f}%",
                    '旧行数': f"{old_value:,}",
                    '新行数': f"{new_value:,}",
                    '变化量': f"{new_value - old_value:+,}"
                }
            )
            
            alert_manager.send_alert(alert)
            logger.info(f"检测到数据量波动: {db_name}.{table_name}, 变化率={change_rate:.1%}")
    
    def handle_partitions_event(self, event):
        """处理PARTITIONS表事件（分区变化）"""
        # 分区的增删比较复杂，通常通过聚合统计来判断
        # 这里简化处理，只记录日志
        if isinstance(event, DeleteRowsEvent):
            logger.debug(f"检测到分区删除: {len(event.rows)}个分区")
        elif isinstance(event, WriteRowsEvent):
            logger.debug(f"检测到分区创建: {len(event.rows)}个分区")
    
    def check_batch_delete(self):
        """检查批量删除操作"""
        rule = self.config.get_alert_rule('batch_delete')
        if not rule.get('enabled', True):
            return
        
        threshold = rule.get('threshold', 10)
        time_window = rule.get('time_window', 600)
        now = time.time()
        
        # 清理过期记录
        self.recent_delete_list = [(t, ts) for t, ts in self.recent_delete_list 
                                   if now - ts <= time_window]
        
        # 检查是否超过阈值
        if len(self.recent_delete_list) >= threshold:
            # 统计涉及的数据库
            databases = set()
            for full_table_name, _ in self.recent_delete_list:
                db_name = full_table_name.split('.', 1)[0]
                databases.add(db_name)
            
            alert = Alert(
                alert_type=AlertType.BATCH_DELETE,
                level=rule.get('level', AlertLevel.CRITICAL),
                database=",".join(databases),
                details={
                    '删除表数': len(self.recent_delete_list),
                    '时间窗口': f"{time_window}秒",
                    '涉及数据库': len(databases)
                }
            )
            
            alert_manager.send_alert(alert)
            logger.critical(f"检测到批量删除: {len(self.recent_delete_list)}张表")
            
            # 清空列表，避免重复告警
            self.recent_delete_list.clear()
    
    def _match_pattern(self, name: str, patterns: list) -> bool:
        """匹配模式（支持通配符）"""
        import fnmatch
        return any(fnmatch.fnmatch(name, pattern) for pattern in patterns)
    
    def process_event(self, binlogevent):
        """处理Binlog事件"""
        try:
            table_name = binlogevent.table
            
            if table_name == 'TBLS':
                if isinstance(binlogevent, DeleteRowsEvent):
                    self.handle_tbls_delete(binlogevent)
                elif isinstance(binlogevent, WriteRowsEvent):
                    self.handle_tbls_insert(binlogevent)
            
            elif table_name == 'DBS':
                if isinstance(binlogevent, DeleteRowsEvent):
                    self.handle_dbs_delete(binlogevent)
            
            elif table_name == 'TABLE_PARAMS':
                if isinstance(binlogevent, UpdateRowsEvent):
                    self.handle_table_params_update(binlogevent)
            
            elif table_name == 'PARTITIONS':
                self.handle_partitions_event(binlogevent)
        
        except Exception as e:
            logger.error(f"处理Binlog事件异常: {e}", exc_info=True)
    
    def run(self):
        """持续运行监控"""
        logger.info("Binlog监控服务启动")
        logger.info(f"监控数据库: {self.config.mysql_database}")
        logger.info(f"监控表: TBLS, DBS, TABLE_PARAMS, PARTITIONS")
        
        # 写入心跳文件
        heartbeat_file = "/var/run/metadata-monitor/heartbeat"
        os.makedirs(os.path.dirname(heartbeat_file), exist_ok=True)
        
        retry_count = 0
        max_retries = 5
        
        while True:
            try:
                logger.info("正在连接到MySQL Binlog...")
                stream = self._get_binlog_stream()
                logger.info("已连接到MySQL Binlog，开始监控")
                
                retry_count = 0  # 重置重试计数
                
                for binlogevent in stream:
                    # 更新心跳
                    with open(heartbeat_file, 'w') as f:
                        f.write(str(time.time()))
                    
                    # 处理事件
                    self.process_event(binlogevent)
                    
                    # 定期刷新告警
                    alert_manager.flush()
            
            except KeyboardInterrupt:
                logger.info("收到停止信号，退出监控")
                break
            
            except Exception as e:
                retry_count += 1
                logger.error(f"Binlog监控异常 (重试 {retry_count}/{max_retries}): {e}", 
                           exc_info=True)
                
                if retry_count >= max_retries:
                    logger.critical("达到最大重试次数，发送告警")
                    try:
                        alert = Alert(
                            alert_type="monitor_error",
                            level=AlertLevel.CRITICAL,
                            database="system",
                            details={
                                '错误信息': str(e),
                                '重试次数': retry_count
                            }
                        )
                        alert_manager.send_alert(alert)
                    except:
                        pass
                    
                    retry_count = 0
                
                # 等待后重试
                time.sleep(min(10 * retry_count, 60))


def main():
    """主函数"""
    monitor = BinlogMonitor()
    monitor.run()


if __name__ == '__main__':
    main()

