# -*- coding: utf-8 -*-
"""
Kafka Topic无新增监控 Provider。
"""
from __future__ import annotations

import json
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple

from app.blueprints.reports.providers.base import BaseProvider, SafeDict
from app.services.report_service import ReportTemplateService
from app.utils.logger import FlinkLogger

logger = None


def get_logger():
    global logger
    if logger is None:
        logger = FlinkLogger.get_logger(__name__)
    return logger


class TopicNoUpdateProvider(BaseProvider):
    """Kafka Topic无新增监控数据提供者"""

    def _get_sql_template(self) -> str:
        """从数据库 default_params 读取 SQL 模板"""
        if not self.template_code:
            raise ValueError("模板编码不能为空")
        
        try:
            tpl = ReportTemplateService.get_template_by_code(self.template_code)
            if not tpl:
                raise ValueError(f"模板 {self.template_code} 不存在")
            
            default_params = tpl.get("default_params")
            if isinstance(default_params, str):
                default_params = json.loads(default_params)
            
            sql_template = default_params.get("sql_template", "")
            if not sql_template:
                raise ValueError("模板配置中缺少 sql_template")
            
            return sql_template
        except Exception as e:
            get_logger().error(f"读取 SQL 模板失败: {e}", exc_info=True)
            raise

    def _get_field_labels(self) -> Dict[str, str]:
        """从数据库读取字段标签"""
        default_labels = {
            "topic_name": "Topic名称",
            "partition_id": "分区ID",
            "last_message_time": "最后消息时间",
            "latest_offset": "最新Offset",
            "hours_no_update": "未更新时长(小时)",
            "status": "状态"
        }
        
        if self.template_code:
            try:
                tpl = ReportTemplateService.get_template_by_code(self.template_code)
                if tpl:
                    default_params = tpl.get("default_params")
                    if isinstance(default_params, str):
                        default_params = json.loads(default_params)
                    template_labels = default_params.get("field_labels", {})
                    if template_labels:
                        default_labels.update(template_labels)
            except Exception:
                pass
        
        return default_labels

    def _build_config_filter(self, kafka_cluster_config: Dict[str, Any]) -> str:
        """构建Kafka集群过滤条件"""
        cluster_id = kafka_cluster_config.get("cluster_id")
        if cluster_id:
            return f" AND config_id = {cluster_id}"
        return ""

    def _build_topic_filter(self, topic_selection: Dict[str, Any]) -> str:
        """构建Topic过滤条件"""
        topic_names = topic_selection.get("topic_names", [])
        if topic_names:
            topics_str = "', '".join(topic_names)
            return f" AND topic_name IN ('{topics_str}')"
        return ""

    def _build_time_filter(self, time_thresholds: Dict[str, Any]) -> str:
        """构建时间过滤条件"""
        thresholds = time_thresholds.get("thresholds", [])
        if not thresholds:
            return ""
        
        conditions = []
        for threshold in thresholds:
            if threshold == "1_day":
                conditions.append("TIMESTAMPDIFF(HOUR, message_timestamp, NOW()) > 24")
            elif threshold == "7_days":
                conditions.append("TIMESTAMPDIFF(HOUR, message_timestamp, NOW()) > 168")
            elif threshold == "1_month":
                conditions.append("TIMESTAMPDIFF(HOUR, message_timestamp, NOW()) > 720")
        
        if conditions:
            return " AND (" + " OR ".join(conditions) + ")"
        return ""

    def _build_sort_clause(self, time_thresholds: Dict[str, Any]) -> str:
        """构建排序子句"""
        sort_by_staleness = time_thresholds.get("sort_by_staleness", True)
        if sort_by_staleness:
            return "TIMESTAMPDIFF(HOUR, message_timestamp, NOW()) DESC"
        return "topic_name ASC, partition_id ASC"

    def collect(self) -> Dict[str, Any]:
        """收集数据"""
        try:
            get_logger().info("[Kafka无新增] 开始收集数据")
            
            # 使用默认监控数据库（从配置或环境变量获取）
            # TODO: 从配置中获取flink_cluster_monitor数据库的ID
            data_source_id = 1  # 暂时硬编码，后续从配置获取
            
            # 构建过滤条件
            kafka_cluster_config = self.params.get("kafka_cluster_config", {})
            topic_selection = self.params.get("topic_selection", {})
            time_thresholds = self.params.get("time_thresholds", {})
            
            config_filter = self._build_config_filter(kafka_cluster_config)
            topic_filter = self._build_topic_filter(topic_selection)
            time_filter = self._build_time_filter(time_thresholds)
            sort_clause = self._build_sort_clause(time_thresholds)
            
            # 获取字段标签
            field_labels = self._get_field_labels()
            
            # 获取SQL模板
            sql_template = self._get_sql_template()
            
            # 构建上下文
            context = {
                "config_filter": config_filter,
                "topic_filter": topic_filter,
                "time_filter": time_filter,
                "sort_clause": sort_clause,
                "topic_name_label": field_labels.get("topic_name", "Topic名称"),
                "partition_id_label": field_labels.get("partition_id", "分区ID"),
                "last_message_time_label": field_labels.get("last_message_time", "最后消息时间"),
                "latest_offset_label": field_labels.get("latest_offset", "最新Offset"),
                "hours_no_update_label": field_labels.get("hours_no_update", "未更新时长(小时)"),
                "status_label": field_labels.get("status", "状态")
            }
            
            # 渲染SQL
            final_sql = sql_template.format_map(SafeDict(context))
            
            get_logger().info(f"[Kafka无新增] 执行查询SQL")
            
            # 执行查询
            with self.get_data_source_connection(data_source_id) as conn:
                cursor = conn.cursor()
                cursor.execute(final_sql)
                columns = [desc[0] for desc in cursor.description]
                rows = cursor.fetchall()
                
                get_logger().info(f"[Kafka无新增] 查询完成，返回 {len(rows)} 条记录")
                
                rows_list = [list(row) for row in rows]
            
            # 根据 display_fields 过滤
            filtered_columns, filtered_rows = self._filter_display_fields(columns, rows_list)
            if filtered_columns is not None:
                columns = filtered_columns
                rows_list = filtered_rows
            
            return {
                "query_result": {
                    "columns": columns,
                    "rows": rows_list
                }
            }
            
        except Exception as exc:
            get_logger().error(f"[Kafka无新增] 收集数据失败: {exc}", exc_info=True)
            raise

    def _filter_display_fields(
        self, columns: List[str], rows: List[List[Any]]
    ) -> Tuple[Optional[List[str]], Optional[List[List[Any]]]]:
        """根据 display_fields 过滤列"""
        display_fields = self.params.get("display_fields")
        if not display_fields:
            return None, None
        
        field_labels = self._get_field_labels()
        column_index_map = {col: idx for idx, col in enumerate(columns)}
        
        filtered_columns = []
        selected_indexes = []
        
        for field in display_fields:
            label = field_labels.get(field)
            if not label:
                continue
            column_idx = column_index_map.get(label)
            if column_idx is None:
                continue
            filtered_columns.append(label)
            selected_indexes.append(column_idx)
        
        if not filtered_columns:
            return [], []
        
        filtered_rows = [
            [row[idx] if idx < len(row) else None for idx in selected_indexes]
            for row in rows
        ]
        
        return filtered_columns, filtered_rows

    def validate_params(self) -> List[str]:
        """验证参数"""
        errors = []
        
        kafka_cluster_config = self.params.get("kafka_cluster_config")
        if not kafka_cluster_config:
            errors.append("缺少Kafka集群配置")
        elif not kafka_cluster_config.get("cluster_id"):
            errors.append("Kafka集群ID不能为空")
        
        time_thresholds = self.params.get("time_thresholds")
        if not time_thresholds:
            errors.append("缺少时间阈值配置")
        
        return errors

    def preview_sql(self) -> str:
        """生成预览SQL"""
        try:
            kafka_cluster_config = self.params.get("kafka_cluster_config", {})
            topic_selection = self.params.get("topic_selection", {})
            time_thresholds = self.params.get("time_thresholds", {})
            
            config_filter = self._build_config_filter(kafka_cluster_config)
            topic_filter = self._build_topic_filter(topic_selection)
            time_filter = self._build_time_filter(time_thresholds)
            sort_clause = self._build_sort_clause(time_thresholds)
            
            field_labels = self._get_field_labels()
            sql_template = self._get_sql_template()
            
            context = {
                "config_filter": config_filter,
                "topic_filter": topic_filter,
                "time_filter": time_filter,
                "sort_clause": sort_clause,
                "topic_name_label": field_labels.get("topic_name", "Topic名称"),
                "partition_id_label": field_labels.get("partition_id", "分区ID"),
                "last_message_time_label": field_labels.get("last_message_time", "最后消息时间"),
                "latest_offset_label": field_labels.get("latest_offset", "最新Offset"),
                "hours_no_update_label": field_labels.get("hours_no_update", "未更新时长(小时)"),
                "status_label": field_labels.get("status", "状态")
            }
            
            return sql_template.format_map(SafeDict(context))
        except Exception as exc:
            get_logger().error(f"生成预览SQL失败: {exc}", exc_info=True)
            raise
