package bigdata.monitor.binlog.consumer;

import bigdata.monitor.binlog.entity.MonitorResult;
import bigdata.monitor.binlog.entity.MonitorRule;
import bigdata.monitor.binlog.service.RuleService;
import bigdata.monitor.binlog.utils.CanalToDebeziumConverter;
import bigdata.monitor.binlog.utils.JsonPathFieldExtractor;
import bigdata.monitor.binlog.utils.JsonUtils;
import bigdata.monitor.binlog.utils.RuleEvaluator;
import com.alibaba.fastjson2.JSON;
import com.alibaba.fastjson2.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.List;
import java.util.UUID;

/**
 * Binlog 消息消费者
 * - 从 Kafka 读取 Flink CDC 的 Binlog JSON（格式包含 table/op/before/after）
 * - 根据缓存的监控规则，逐条校验当前消息
 * - 若某条规则校验失败（即数据违规），则生成违规结果
 * - 将违规结果序列化为 JSON，并发送到 Kafka 违规事件 Topic（如 monitor-violation-events）
 *
 * 架构说明：
 * - 不再使用“批量 SQL 表达式”方式（如 CombinedRuleSqlBuilder）
 * - 改为“逐条规则独立校验”模式，由 UnifiedRuleEngine 自动路由：
 *     • 基础规则（TYPE/ENUM/LENGTH/PATTERN） → 使用 json-schema-validator
 *     • 自定义规则（CUSTOM） → 使用 SQLite 内存表达式求值
 */
@Slf4j
//@Component
public class BinlogConsumerByKafka {

    @Autowired
    private RuleService ruleService;

    @Autowired
    private RuleEvaluator ruleEvaluator;

    @Autowired
    private JsonPathFieldExtractor fieldExtractor;

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    @Value("${monitor.initial-topics}")
    private String initialTopics;

    @Value("${monitor.violation-topic:monitor-violation-events}")
    private String violationTopic;

    @KafkaListener(topics = "#{'${monitor.initial-topics}'.split(',')}", groupId = "binlog-monitor-group")
    public void consume(String binlogJsonMessage, Acknowledgment ack) {
        try {
            String debeziumJson = CanalToDebeziumConverter.convert(binlogJsonMessage);
            JSONObject obj = JSON.parseObject(debeziumJson);
            JSONObject source = obj.getJSONObject("source");
            String databaseName = source.getString("db");
            String tableName = source.getString("table");
            String key = databaseName + "." + tableName;
            List<MonitorRule> rules = ruleService.getSortedRulesByKey(key);
            if (rules.isEmpty()) {
                ack.acknowledge(); // 无规则也应提交 offset
                return;
            }

            // 1. 针对当前binlog json一次性执行所有校验规则，将不符合要求的规则返回
            List<MonitorRule> failedRules = ruleEvaluator.evaluateRules(binlogJsonMessage, rules);

            // 2. 将每个违规结果发送到 Kafka
            for (MonitorRule rule : failedRules) {
                // 根据配置获取需要输出的字段 并获取变更前后的值
                String changedFields = fieldExtractor.extractChangedFields(binlogJsonMessage, rule.getOutputFields());
                MonitorResult result = new MonitorResult();
                result.setTableName(tableName);
                result.setPrimaryKey(JsonUtils.getPrimaryKey(binlogJsonMessage, rule.getPrimaryField()));
                result.setBatchId(UUID.randomUUID().toString());
                result.setRuleName(rule.getRuleName());
                result.setRuleCode(rule.getRuleCode());
                result.setChangedFields(changedFields);

                // 序列化为 JSON 并发送到违规事件 Topic
                String violationJson = JSON.toJSONString(result);
                kafkaTemplate.send(violationTopic, violationJson);

                log.info("Rule violated: {} ({}) for pk={}", rule.getRuleName(), rule.getRuleCode(), result.getPrimaryKey());
            }
        } catch (Exception e) {
            log.error("Error processing message", e);
        } finally {
            ack.acknowledge(); // 手动提交 offset，确保 at-least-once
        }
    }
}
