package com.ververica.cdc.connectors.oracle.debezium;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import io.debezium.config.Configuration;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.format.Json;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

public class DebeziumEmbeddedOracleToOracle {


    //目标数据库配置
    public static String targetDbUrl = "jdbc:oracle:thin:@192.168.25.208:1521/oracle11g.us.oracle.com";
    public static String user = "datalink_target";
    public static String password = "test@123";

    private static List<ChangeEvent<String, String>> buffer = new ArrayList<>();
    private static final int BATCH_SIZE = 100;
    private static final Logger logger = LoggerFactory.getLogger(DebeziumEmbeddedOracleToOracle.class);

    private static HikariDataSource dataSource;

    static {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl(targetDbUrl);
        config.setUsername(user);
        config.setPassword(password);
        config.setMaximumPoolSize(10);
        config.setMinimumIdle(2);
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        dataSource = new HikariDataSource(config);
    }


    public static void main(String[] args) {
        // 源数据库 Debezium配置
        Configuration config = Configuration.create()
                .with("name", "oracle-embedded-connector")
                .with("connector.class", "io.debezium.connector.oracle.OracleConnector")
                .with("tasks.max", "1")
                .with("database.hostname", "192.168.25.208")
                .with("database.port", "1521")
                .with("database.user", "datalink_source")
                .with("database.password", "test@123")
                .with("database.dbname", "oracle11g.us.oracle.com")
                //指定要连接的 Oracle Pluggable Database (PDB) 名称。在 Oracle 多租户架构中，一个容器数据库 (CDB) 可以包含多个 PDB。这个参数用于明确指定 Debezium 连接到哪个 PDB。
                //.with("database.pdb.name", "ORCLPDB1")
                //为 Debezium 连接器指定一个逻辑名称，用于标识源数据库。这个名称在 Kafka 模式下会作为事件主题（topic）的前缀，但在不使用 Kafka 时，仍然可以用作唯一标识源的标识符。
                .with("database.service.name", "oracle11g.us.oracle.com")
                //要捕获变更的 Oracle 模式（Schema）列表
                .with("database.include.list", "DATALINK_SOURCE")
                //指定要捕获变更的表的列表。格式为 schema.table，多个表用逗号分隔。
                .with("table.include.list", "DATALINK_SOURCE.DL_TEST_SOURCE1")
                .with("database.history", "io.debezium.relational.history.FileDatabaseHistory")
                .with("database.history.file.filename", "/Users/linzhihao/IdeaProjects/flink-cdc/flink-cdc-connect/flink-cdc-source-connectors/flink-connector-oracle-cdc/src/test/resources/historyfile/dbhistory.txt") //指定一个文件路径，用于存储数据库的历史记
                .with("offset.storage", "org.apache.kafka.connect.storage.FileOffsetBackingStore")
                .with("offset.storage.file.filename", "/Users/linzhihao/IdeaProjects/flink-cdc/flink-cdc-connect/flink-cdc-source-connectors/flink-connector-oracle-cdc/src/test/resources/historyfile/offsets.txt") // 指定一个文件路径来存储偏移量
                .with("database.preferred.username", "debezium")
                .with("database.log.mining.strategy", "ONLINE") // Log Miner 策略
                .with("snapshot.mode", "initial") // 启用初始快照
                .with("tombstones.on.delete", "false") // 删除时不生成墓碑记录
                .with("heartbeat.interval.ms", "30000") // 心跳间隔
                .with("poll.interval.ms", "1000") // 轮询间隔
                .with("max.batch.size", "2048") // 最大批量大小
                .with("key.converter", "org.apache.kafka.connect.json.JsonConverter") // 键转换器
                .with("value.converter", "org.apache.kafka.connect.json.JsonConverter") // 值转换器
                .with("key.converter.schemas.enable", "true") // 键转换器启用模式
                .with("value.converter.schemas.enable", "true") // 值转换器启用模式
                .with("schemas.cache.size", "1000") // 模式缓存大小
                .with("logging.level.io.debezium", "DEBUG")
                .with("logging.level.org.apache.kafka.connect", "DEBUG")
                .build();

        // 创建 Debezium 引擎
        DebeziumEngine<ChangeEvent<String, String>> engine = DebeziumEngine.create(Json.class)
                .using(config.asProperties())
                .notifying(DebeziumEmbeddedOracleToOracle::handleChangeEvent)
                .build();

        // 启动引擎
        ExecutorService executor = Executors.newSingleThreadExecutor();
        executor.execute(engine);

        // 添加关闭钩子
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            try {
                engine.close();
                executor.shutdown();
                dataSource.close();
                logger.info("Debezium引擎已关闭。");
            } catch (Exception e) {
                logger.error("关闭 Debezium 引擎时出错", e);
            }
        }));

        logger.info("Debezium Oracle Sync 应用已启动。");
    }


    private static synchronized void handleChangeEvent(ChangeEvent<String, String> changeEvent) {
        buffer.add(changeEvent);
        if (buffer.size() >= BATCH_SIZE) {
            syncBatchToTargetDatabase(new ArrayList<>(buffer));
            buffer.clear();
        }
    }

    private static void syncBatchToTargetDatabase(List<ChangeEvent<String, String>> events) {
        logger.info("开始同步 {} 条变更数据到目标数据库。", events.size());

        ObjectMapper mapper = new ObjectMapper();
        try (Connection targetConn = dataSource.getConnection()) {
            targetConn.setAutoCommit(false);

            int index = 0;
            for (ChangeEvent<String, String> event : events) {
                String payload = event.value();
                try {
                    JsonNode node = mapper.readTree(payload);
                    String table = node.get("source").get("table").asText();

                    if (node.get("op") != null) {
                        String operation = node.get("op").asText();
                        JsonNode data = node.get("after"); // 插入和更新
                        JsonNode before = node.get("before"); // 更新和删除

                        if ("c".equals(operation) || "u".equals(operation)) {
                            // 插入或更新操作
                            String mergeSql = String.format(
                                    "MERGE INTO %s t USING (SELECT ? AS id FROM dual) s " +
                                            "ON (t.id = s.id) " +
                                            "WHEN MATCHED THEN UPDATE SET t.name = ?, t.age = ?, t.create_time = ?, t.update_time = ? " +
                                            "WHEN NOT MATCHED THEN INSERT (id, name, age, create_time, update_time) " +
                                            "VALUES (?, ?, ?, ?, ?)", table);

                            try (PreparedStatement pstmt = targetConn.prepareStatement(mergeSql)) {
                                long id = data.get("id").asLong();
                                String name = data.get("name").asText();
                                String age = data.get("age").asText();
                                Date createTime = Date.valueOf(data.get("create_time").asText());
                                Date updateTime = Date.valueOf(data.get("update_time").asText());

                                // 设置参数
                                pstmt.setLong(1, id);
                                pstmt.setString(2, name);
                                pstmt.setString(3, age);
                                pstmt.setDate(4, createTime);
                                pstmt.setDate(5, updateTime);
                                pstmt.setLong(6, id);
                                pstmt.setString(7, name);
                                pstmt.setString(8, age);
                                pstmt.setDate(9, createTime);
                                pstmt.setDate(10, updateTime);

                                pstmt.executeUpdate();
                            }

                        } else if ("d".equals(operation)) {
                            // 删除操作
                            String deleteSql = String.format("DELETE FROM %s WHERE id = ?", table);
                            try (PreparedStatement pstmt = targetConn.prepareStatement(deleteSql)) {
                                long id = before.get("id").asLong();
                                pstmt.setLong(1, id);
                                pstmt.executeUpdate();
                            }
                        } else {
                            logger.warn("未知的操作类型: {}", operation);
                        }
                        index++;
                    }

                } catch (Exception e) {
                    logger.error("同步单条数据时出错: {}", payload, e);
                    // 根据需求决定是否继续或中断
                }
            }

            targetConn.commit();
            logger.info("成功同步 {} 条变更数据到目标数据库。", index);
        } catch (Exception e) {
            logger.error("同步批量数据时出错。尝试回滚。", e);
            try (Connection targetConn = dataSource.getConnection()) {
                targetConn.rollback();
            } catch (SQLException rollbackEx) {
                logger.error("回滚事务时出错。", rollbackEx);
            }
        }
    }

}
