# -*- coding: utf-8 -*-
"""
Hive元数据库监控脚本
通过定期查询元数据库，对比快照数据，识别删表、删库、数据量波动等操作
"""

import sys
import os
import time
import logging
from typing import Dict, Set, List, Any, Optional
from datetime import datetime
import pymysql
from pymysql.cursors import DictCursor
from dbutils.pooled_db import PooledDB

# 添加父目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from common.config import config
from common.alert_manager import alert_manager, Alert, AlertLevel, AlertType

# 配置日志
logging.basicConfig(
    level=getattr(logging, config.log_level),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(config.log_file),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class MetadataSnapshot:
    """元数据快照"""
    
    def __init__(self):
        # 数据库集合: {db_name}
        self.databases: Set[str] = set()
        
        # 表集合: {db_name.table_name}
        self.tables: Set[str] = set()
        
        # 表详情: {db_name.table_name: {partition_count, row_count, ...}}
        self.table_details: Dict[str, Dict[str, Any]] = {}
        
        # 数据库下的表数量: {db_name: count}
        self.db_table_counts: Dict[str, int] = {}
        
        self.timestamp = datetime.now()
    
    def add_database(self, db_name: str):
        """添加数据库"""
        self.databases.add(db_name)
    
    def add_table(self, db_name: str, table_name: str, details: Dict[str, Any] = None):
        """添加表"""
        full_name = f"{db_name}.{table_name}"
        self.tables.add(full_name)
        
        if details:
            self.table_details[full_name] = details
        
        # 更新数据库表计数
        self.db_table_counts[db_name] = self.db_table_counts.get(db_name, 0) + 1
    
    def get_deleted_databases(self, new_snapshot: 'MetadataSnapshot') -> Set[str]:
        """获取被删除的数据库"""
        return self.databases - new_snapshot.databases
    
    def get_deleted_tables(self, new_snapshot: 'MetadataSnapshot') -> Set[str]:
        """获取被删除的表"""
        return self.tables - new_snapshot.tables
    
    def get_new_tables(self, new_snapshot: 'MetadataSnapshot') -> Set[str]:
        """获取新增的表"""
        return new_snapshot.tables - self.tables
    
    def compare_data_volume(self, new_snapshot: 'MetadataSnapshot', 
                          table_name: str) -> Optional[Dict[str, Any]]:
        """
        比较表的数据量变化
        
        Returns:
            变化信息字典，如果没有变化则返回None
        """
        if table_name not in self.table_details or table_name not in new_snapshot.table_details:
            return None
        
        old_details = self.table_details[table_name]
        new_details = new_snapshot.table_details[table_name]
        
        changes = {}
        
        # 比较分区数
        old_partitions = old_details.get('partition_count', 0)
        new_partitions = new_details.get('partition_count', 0)
        
        if old_partitions > 0:
            partition_change_rate = abs(new_partitions - old_partitions) / old_partitions
            if partition_change_rate > 0.01:  # 变化超过1%才记录
                changes['partition_change'] = {
                    'old': old_partitions,
                    'new': new_partitions,
                    'rate': partition_change_rate
                }
        
        # 比较行数（如果有）
        old_rows = old_details.get('row_count', 0)
        new_rows = new_details.get('row_count', 0)
        
        if old_rows > 0:
            row_change_rate = abs(new_rows - old_rows) / old_rows
            if row_change_rate > 0.01:
                changes['row_change'] = {
                    'old': old_rows,
                    'new': new_rows,
                    'rate': row_change_rate
                }
        
        return changes if changes else None


class MetadataMonitor:
    """元数据监控器"""
    
    def __init__(self):
        self.config = config
        self.db_pool = self._create_db_pool()
        self.last_snapshot: Optional[MetadataSnapshot] = None
        
        # 表覆盖检测：{table_name: delete_timestamp}
        self.recent_deletes: Dict[str, float] = {}
        
        # 批量删除检测：[(table_name, timestamp)]
        self.recent_delete_list: List[tuple] = []
    
    def _create_db_pool(self) -> PooledDB:
        """创建数据库连接池"""
        return PooledDB(
            creator=pymysql,
            maxconnections=self.config.mysql_pool_size,
            host=self.config.mysql_host,
            port=self.config.mysql_port,
            user=self.config.mysql_user,
            password=self.config.mysql_password,
            database=self.config.mysql_database,
            charset='utf8mb4',
            cursorclass=DictCursor
        )
    
    def _get_connection(self):
        """获取数据库连接"""
        return self.db_pool.connection()
    
    def collect_snapshot(self) -> MetadataSnapshot:
        """采集当前元数据快照"""
        snapshot = MetadataSnapshot()
        
        conn = self._get_connection()
        try:
            with conn.cursor() as cursor:
                # 1. 查询所有数据库
                cursor.execute("""
                    SELECT NAME as db_name 
                    FROM DBS 
                    WHERE NAME NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
                """)
                
                for row in cursor.fetchall():
                    snapshot.add_database(row['db_name'])
                
                # 2. 查询所有表及其详情
                cursor.execute("""
                    SELECT 
                        d.NAME as db_name,
                        t.TBL_NAME as table_name,
                        t.TBL_ID as table_id,
                        t.CREATE_TIME as create_time,
                        t.OWNER as owner
                    FROM TBLS t
                    JOIN DBS d ON t.DB_ID = d.DB_ID
                    WHERE d.NAME NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
                """)
                
                tables = cursor.fetchall()
                table_ids = []
                
                for row in tables:
                    db_name = row['db_name']
                    table_name = row['table_name']
                    table_id = row['table_id']
                    table_ids.append(table_id)
                    
                    # 查询分区数
                    cursor.execute("""
                        SELECT COUNT(*) as partition_count
                        FROM PARTITIONS
                        WHERE TBL_ID = %s
                    """, (table_id,))
                    
                    partition_result = cursor.fetchone()
                    partition_count = partition_result['partition_count'] if partition_result else 0
                    
                    # 查询表参数（可能包含行数统计）
                    cursor.execute("""
                        SELECT PARAM_KEY, PARAM_VALUE
                        FROM TABLE_PARAMS
                        WHERE TBL_ID = %s AND PARAM_KEY IN ('numRows', 'totalSize', 'numFiles')
                    """, (table_id,))
                    
                    params = {p['PARAM_KEY']: p['PARAM_VALUE'] for p in cursor.fetchall()}
                    
                    details = {
                        'table_id': table_id,
                        'partition_count': partition_count,
                        'row_count': int(params.get('numRows', 0)),
                        'total_size': int(params.get('totalSize', 0)),
                        'num_files': int(params.get('numFiles', 0)),
                        'create_time': row['create_time'],
                        'owner': row['owner']
                    }
                    
                    snapshot.add_table(db_name, table_name, details)
            
            logger.info(f"采集快照完成: {len(snapshot.databases)}个数据库, "
                       f"{len(snapshot.tables)}张表")
            
        finally:
            conn.close()
        
        return snapshot
    
    def check_drop_database(self, deleted_dbs: Set[str]):
        """检查删库操作"""
        rule = self.config.get_alert_rule('drop_database')
        if not rule.get('enabled', True):
            return
        
        whitelist = rule.get('whitelist', [])
        
        for db_name in deleted_dbs:
            # 检查白名单
            if self._match_pattern(db_name, whitelist):
                logger.info(f"数据库 {db_name} 在白名单中，跳过告警")
                continue
            
            # 发送告警
            alert = Alert(
                alert_type=AlertType.DROP_DATABASE,
                level=rule.get('level', AlertLevel.CRITICAL),
                database=db_name,
                details={
                    '表数量': self.last_snapshot.db_table_counts.get(db_name, 0)
                }
            )
            
            alert_manager.send_alert(alert)
            logger.warning(f"检测到删除数据库: {db_name}")
    
    def check_drop_table(self, deleted_tables: Set[str]):
        """检查删表操作"""
        rule = self.config.get_alert_rule('drop_table')
        if not rule.get('enabled', True):
            return
        
        whitelist = rule.get('whitelist', [])
        blacklist = rule.get('blacklist', [])
        
        now = time.time()
        
        for full_table_name in deleted_tables:
            db_name, table_name = full_table_name.split('.', 1)
            
            # 记录删除操作（用于表覆盖检测）
            self.recent_deletes[full_table_name] = now
            self.recent_delete_list.append((full_table_name, now))
            
            # 检查白名单和黑名单
            in_whitelist = self._match_pattern(full_table_name, whitelist)
            in_blacklist = self._match_pattern(full_table_name, blacklist)
            
            if in_whitelist and not in_blacklist:
                logger.info(f"表 {full_table_name} 在白名单中，跳过告警")
                continue
            
            # 获取表详情
            table_details = self.last_snapshot.table_details.get(full_table_name, {})
            
            # 发送告警
            alert = Alert(
                alert_type=AlertType.DROP_TABLE,
                level=rule.get('level', AlertLevel.SEVERE),
                database=db_name,
                table=table_name,
                user=table_details.get('owner'),
                details={
                    '分区数': table_details.get('partition_count', 0),
                    '行数': table_details.get('row_count', 0),
                    '大小': self._format_size(table_details.get('total_size', 0)),
                    '创建时间': self._format_timestamp(table_details.get('create_time'))
                }
            )
            
            alert_manager.send_alert(alert)
            logger.warning(f"检测到删除表: {full_table_name}")
    
    def check_table_overwrite(self, new_tables: Set[str]):
        """检查表覆盖操作（短时间内删除后重建）"""
        rule = self.config.get_alert_rule('table_overwrite')
        if not rule.get('enabled', True):
            return
        
        time_window = rule.get('time_window', 300)  # 默认5分钟
        now = time.time()
        
        for full_table_name in new_tables:
            # 检查是否在时间窗口内被删除过
            delete_time = self.recent_deletes.get(full_table_name)
            if delete_time and (now - delete_time) <= time_window:
                db_name, table_name = full_table_name.split('.', 1)
                
                alert = Alert(
                    alert_type=AlertType.TABLE_OVERWRITE,
                    level=rule.get('level', AlertLevel.SEVERE),
                    database=db_name,
                    table=table_name,
                    details={
                        '删除时间': self._format_timestamp(delete_time),
                        '重建时间': self._format_timestamp(now),
                        '时间间隔': f"{int(now - delete_time)}秒"
                    }
                )
                
                alert_manager.send_alert(alert)
                logger.warning(f"检测到表覆盖: {full_table_name}")
        
        # 清理过期的删除记录
        self.recent_deletes = {k: v for k, v in self.recent_deletes.items() 
                              if now - v <= time_window}
    
    def check_batch_delete(self):
        """检查批量删除操作"""
        rule = self.config.get_alert_rule('batch_delete')
        if not rule.get('enabled', True):
            return
        
        threshold = rule.get('threshold', 10)
        time_window = rule.get('time_window', 600)  # 默认10分钟
        now = time.time()
        
        # 清理过期记录
        self.recent_delete_list = [(t, ts) for t, ts in self.recent_delete_list 
                                   if now - ts <= time_window]
        
        # 检查是否超过阈值
        if len(self.recent_delete_list) >= threshold:
            # 统计涉及的数据库
            databases = set()
            for full_table_name, _ in self.recent_delete_list:
                db_name = full_table_name.split('.', 1)[0]
                databases.add(db_name)
            
            alert = Alert(
                alert_type=AlertType.BATCH_DELETE,
                level=rule.get('level', AlertLevel.CRITICAL),
                database=",".join(databases),
                details={
                    '删除表数': len(self.recent_delete_list),
                    '时间窗口': f"{time_window}秒",
                    '涉及数据库': len(databases)
                }
            )
            
            alert_manager.send_alert(alert)
            logger.critical(f"检测到批量删除: {len(self.recent_delete_list)}张表")
            
            # 清空列表，避免重复告警
            self.recent_delete_list.clear()
    
    def check_data_fluctuation(self, current_snapshot: MetadataSnapshot):
        """检查数据量波动"""
        rule = self.config.get_alert_rule('data_fluctuation')
        if not rule.get('enabled', True):
            return
        
        partition_threshold = rule.get('partition_change_threshold', 0.3)
        row_threshold = rule.get('row_count_threshold', 0.5)
        
        # 只检查共同存在的表
        common_tables = self.last_snapshot.tables & current_snapshot.tables
        
        for full_table_name in common_tables:
            changes = self.last_snapshot.compare_data_volume(current_snapshot, full_table_name)
            
            if not changes:
                continue
            
            # 判断是否需要告警
            should_alert = False
            alert_level = AlertLevel.INFO
            alert_details = {}
            
            # 检查分区变化
            if 'partition_change' in changes:
                change_rate = changes['partition_change']['rate']
                if change_rate >= partition_threshold:
                    should_alert = True
                    alert_level = AlertLevel.WARNING if change_rate < 0.5 else AlertLevel.SEVERE
                    alert_details['分区变化率'] = f"{change_rate * 100:.1f}%"
                    alert_details['旧分区数'] = changes['partition_change']['old']
                    alert_details['新分区数'] = changes['partition_change']['new']
            
            # 检查行数变化
            if 'row_change' in changes:
                change_rate = changes['row_change']['rate']
                if change_rate >= row_threshold:
                    should_alert = True
                    alert_level = AlertLevel.WARNING if change_rate < 0.7 else AlertLevel.SEVERE
                    alert_details['行数变化率'] = f"{change_rate * 100:.1f}%"
                    alert_details['旧行数'] = f"{changes['row_change']['old']:,}"
                    alert_details['新行数'] = f"{changes['row_change']['new']:,}"
            
            if should_alert:
                db_name, table_name = full_table_name.split('.', 1)
                
                alert = Alert(
                    alert_type=AlertType.DATA_FLUCTUATION,
                    level=alert_level,
                    database=db_name,
                    table=table_name,
                    details=alert_details
                )
                
                alert_manager.send_alert(alert)
                logger.info(f"检测到数据量波动: {full_table_name}, {alert_details}")
    
    def _match_pattern(self, name: str, patterns: List[str]) -> bool:
        """
        匹配模式
        支持通配符: * 和 ?
        """
        import fnmatch
        return any(fnmatch.fnmatch(name, pattern) for pattern in patterns)
    
    def _format_size(self, size_bytes: int) -> str:
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
            if size_bytes < 1024.0:
                return f"{size_bytes:.2f} {unit}"
            size_bytes /= 1024.0
        return f"{size_bytes:.2f} PB"
    
    def _format_timestamp(self, timestamp) -> str:
        """格式化时间戳"""
        if timestamp is None:
            return "未知"
        
        if isinstance(timestamp, (int, float)):
            dt = datetime.fromtimestamp(timestamp)
        else:
            dt = datetime.fromtimestamp(int(timestamp))
        
        return dt.strftime('%Y-%m-%d %H:%M:%S')
    
    def run_once(self):
        """执行一次监控"""
        try:
            # 采集快照
            current_snapshot = self.collect_snapshot()
            
            # 如果有上次快照，进行对比
            if self.last_snapshot:
                # 检查删库
                deleted_dbs = self.last_snapshot.get_deleted_databases(current_snapshot)
                if deleted_dbs:
                    self.check_drop_database(deleted_dbs)
                
                # 检查删表
                deleted_tables = self.last_snapshot.get_deleted_tables(current_snapshot)
                if deleted_tables:
                    self.check_drop_table(deleted_tables)
                    self.check_batch_delete()
                
                # 检查表覆盖
                new_tables = self.last_snapshot.get_new_tables(current_snapshot)
                if new_tables:
                    self.check_table_overwrite(new_tables)
                
                # 检查数据量波动
                self.check_data_fluctuation(current_snapshot)
            
            # 更新快照
            self.last_snapshot = current_snapshot
            
            # 刷新告警
            alert_manager.flush()
            
        except Exception as e:
            logger.error(f"监控执行异常: {e}", exc_info=True)
            
            # 发送监控异常告警
            try:
                alert = Alert(
                    alert_type="monitor_error",
                    level=AlertLevel.CRITICAL,
                    database="system",
                    details={'错误信息': str(e)}
                )
                alert_manager.send_alert(alert)
            except:
                pass
    
    def run(self):
        """持续运行监控"""
        logger.info("元数据监控服务启动")
        logger.info(f"监控间隔: {self.config.monitor_interval}秒")
        
        # 写入心跳文件
        heartbeat_file = "/var/run/metadata-monitor/heartbeat"
        os.makedirs(os.path.dirname(heartbeat_file), exist_ok=True)
        
        while True:
            try:
                # 更新心跳
                with open(heartbeat_file, 'w') as f:
                    f.write(str(time.time()))
                
                # 执行监控
                self.run_once()
                
                # 等待下一次监控
                time.sleep(self.config.monitor_interval)
                
            except KeyboardInterrupt:
                logger.info("收到停止信号，退出监控")
                break
            except Exception as e:
                logger.error(f"监控循环异常: {e}", exc_info=True)
                time.sleep(10)  # 异常后等待10秒再继续


def main():
    """主函数"""
    monitor = MetadataMonitor()
    monitor.run()


if __name__ == '__main__':
    main()

