/*
 * MIT License
 *
 * Copyright (c) 2024 org.zzz (https://gitee.com/frostforest) (https://gitee.com/frostforest)
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

package org.zzz.pcdc;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.sql.DataSource;
import java.sql.*;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import static org.zzz.pcdc.PcdcBacklogDataVolumeStats.COL_MAX_UPDATED_AT;
import static org.zzz.pcdc.PcdcBacklogDataVolumeStats.COL_MIN_UPDATED_AT;
import static org.zzz.pcdc.PcdcThreadConfig.FORCE_SAVE_PROGRESS_SECONDS;
import static org.zzz.pcdc.PcdcUtils.*;

/**
 * Poorman's Change Data Capture (PCDC) For MySQL - 一个轻量级的MySQL变更数据捕获工具。
 * <p>
 * PCDC是一个轻量级、秒级延迟的基于查询的变更数据捕获(CDC)工具。
 * 它通过定期扫描目标表的复合索引(updated_at,id)来检测数据变更，并将这些变更
 * 传递给用户定义的处理器进行处理。
 * <p>
 * <b>主要特点：</b>
 * <ul>
 *   <li>轻量级设计，对数据库性能影响小</li>
 *   <li>秒级延迟，满足大多数近实时数据同步需求</li>
 *   <li>基于时间戳的变更检测</li>
 *   <li>支持分布式环境下的并发处理</li>
 *   <li>自动分区处理大量数据变更</li>
 * </ul>
 * <p>
 * <b>使用要求：</b>
 * <ol>
 *   <li>被监控表必须有复合索引 (updated_at,id)</li>
 *   <li>只支持数值型主键</li>
 *   <li>扫描周期不小于{@value PcdcThreadConfig#MIN_POLLING_INTERVAL_SECONDS}秒</li>
 * </ol>
 * <p>
 * PCDC使用进度追踪表(zzz_pcdc_progress_{your_table_name})来记录处理进度，
 * 确保数据处理的可靠性和一致性，即使在系统重启或故障后也能从上次处理的位置继续。
 * <p>
 * 推荐通过{@link PcdcBuilder}类来创建和配置Pcdc实例。
 *
 * @see PcdcBuilder
 * @see PcdcDataHandler
 * @see PcdcErrorHandler
 * @since 1.0
 */
public class Pcdc implements AutoCloseable {
    public static final String INDEX_IS_COMPLETE_INDEX = "is_complete,lock_expire_time,upper_boundary";
    private static final Logger LOGGER = LoggerFactory.getLogger(Pcdc.class);
    private static final String MYSQL_MIN_DATETIME = "1000-01-01 00:00:00";
    private static final String MYSQL_MAX_DATETIME = "9999-12-31 23:59:59";
    private static final ThreadLocal<Integer> WORKER_ID_HOLDER = new ThreadLocal<>();
    private static final int DEFAULT_PROGRESS_FETCH_SIZE = 20;
    private static final int MAX_PENDING_BLOCK_RETRY = 3;
    final DataSource dataSource;
    final PcdcSequence sequence;
    final String progressTableName;
    final Map<String, List<String>> progressTableIndex;
    final String isCompleteDotLockExpireTimeDotUpperBoundaryIndex;
    final Map<String, List<String>> canUseIndexMapInCdcTable;
    final String updatedAtDotIdIndex;
    final PcdcTableConfig pcdcTableConfig;
    final PcdcThreadConfig pcdcThreadConfig;
    final PcdcDataHandler dataHandler;
    final PcdcErrorHandler errorHandler;
    private final ThreadPoolExecutor workerPool;
    private boolean isRunning = false;
    private boolean stopped = false;

    /**
     * @param dataSource       数据库连接池
     * @param pcdcTableConfig  表配置，主键必须是数值型
     * @param pcdcThreadConfig CDC线程配置
     * @param dataHandler      数据处理函数
     * @param errorHandler     错误处理函数
     */
    Pcdc(DataSource dataSource,
         PcdcTableConfig pcdcTableConfig,
         PcdcThreadConfig pcdcThreadConfig,
         PcdcDataHandler dataHandler,
         PcdcErrorHandler errorHandler,
         String progressTableName,
         Map<String, List<String>> progressTableIndex,
         String isCompleteDotLockExpireTimeDotUpperBoundaryIndex,
         Map<String, List<String>> canUseIndexMapInCdcTable,
         String updatedAtDotIdIndex) {
        this.dataSource = dataSource;
        this.sequence = new PcdcSequence(dataSource);
        this.progressTableName = progressTableName;
        this.progressTableIndex = progressTableIndex;
        this.isCompleteDotLockExpireTimeDotUpperBoundaryIndex = isCompleteDotLockExpireTimeDotUpperBoundaryIndex;
        this.canUseIndexMapInCdcTable = canUseIndexMapInCdcTable;
        this.updatedAtDotIdIndex = updatedAtDotIdIndex;
        this.pcdcTableConfig = pcdcTableConfig;
        this.pcdcThreadConfig = pcdcThreadConfig;
        this.dataHandler = dataHandler;
        this.errorHandler = errorHandler;
        final int corePoolSize = pcdcThreadConfig.getWorkerPoolSize();
        this.workerPool = new ThreadPoolExecutor(
                corePoolSize,
                corePoolSize,
                180L,
                TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(DEFAULT_PROGRESS_FETCH_SIZE),
                new PcdcThreadFactory(pcdcTableConfig)
        );
    }

    /**
     * 获取当前的上界时间戳。
     * <p>
     * 此方法计算并返回当前的上界时间戳，用于确定CDC处理的分区时间范围。
     * 上界时间戳的计算基于以下规则：
     * <ol>
     *   <li>取当前时间减去秒数除以{@link PcdcThreadConfig#getThrottleSeconds()}的余数，确保时间边界对齐</li>
     *   <li>与目标表中最大updated_at时间戳加1秒比较，取较小值</li>
     * </ol>
     * <p>
     * 只有当计算出的上界时间戳大于进度表中的最大上界时间戳时，才会返回该值，
     * 否则返回null，表示没有新的时间边界需要处理。
     *
     * @return 计算出的上界时间戳，如果没有新的时间边界需要处理则返回null
     * @throws SQLException 如果在查询数据库时发生错误
     */
    private LocalDateTime retrieveCurrentUpperBoundary() throws SQLException {
        //-- 当前时间向下取整到最近的throttle间隔点
        //-- 表中最大时间向上取整到最近的throttle间隔点
        //-- 取上述两者较小值
        //-- 过滤条件：仅取大于历史最大边界的值
        String query = stripLineByLine("""
                WITH
                    curr_time AS (
                        SELECT CAST(CURRENT_TIMESTAMP() - INTERVAL 1 SECOND AS DATETIME) AS ts
                    ),
                    max_time AS (
                        SELECT COALESCE(MAX(`%s`), '%s') AS max_time
                        FROM `%s`
                    ),
                    curr_time_seconds AS (
                        SELECT
                            HOUR(ct.ts) * 3600 +
                            MINUTE(ct.ts) * 60 +
                            SECOND(ct.ts) AS total_seconds
                        FROM curr_time ct
                    ),
                    max_time_seconds AS (
                        SELECT
                            HOUR(mt.max_time) * 3600 +
                            MINUTE(mt.max_time) * 60 +
                            SECOND(mt.max_time) AS total_seconds
                        FROM max_time mt
                    )
                SELECT
                    CAST(
                        LEAST(
                            ct.ts - INTERVAL ((SELECT total_seconds FROM curr_time_seconds) %% ?) SECOND,
                            (
                                SELECT mt.max_time + INTERVAL
                                    (? - (SELECT total_seconds FROM max_time_seconds) %% ?)
                                SECOND
                                FROM max_time mt
                            )
                        ) AS DATETIME
                    ) AS upper_boundary
                FROM curr_time ct
                HAVING upper_boundary > (
                    SELECT COALESCE(MAX(upper_boundary), '%s')
                    FROM `%s`
                )               
                """).formatted(pcdcTableConfig.getUpdatedAtColumnName(),
                MYSQL_MIN_DATETIME,
                pcdcTableConfig.getCdcTableName(),
                MYSQL_MIN_DATETIME,
                getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            conn.setReadOnly(true);
            conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            try (PreparedStatement boundaryQueryStmt = conn.prepareStatement(
                    query)) {
                boundaryQueryStmt.setInt(1, pcdcThreadConfig.getThrottleSeconds());
                boundaryQueryStmt.setInt(2, pcdcThreadConfig.getThrottleSeconds());
                boundaryQueryStmt.setInt(3, pcdcThreadConfig.getThrottleSeconds());
                boundaryQueryStmt.setFetchSize(1);
                try (java.sql.ResultSet rs = boundaryQueryStmt.executeQuery()) {
                    if (rs.next()) {
                        LocalDateTime localDateTime = rs.getObject(1, LocalDateTime.class);
                        return localDateTime;
                    }
                    return null;
                }
            }
        }
    }

    /**
     * 为最近时间边界内的变更创建进度记录。
     * <p>
     * 此方法首先调用{@link #retrieveCurrentUpperBoundary()}获取当前的上界时间戳，
     * 然后调用{@link #insertProgressByBoundary(LocalDateTime)}为该时间边界创建进度记录。
     * <p>
     * 如果没有新的时间边界需要处理（retrieveCurrentUpperBoundary返回null），
     * 则返回0，表示没有创建新的进度记录。
     *
     * @return 创建的进度记录数量，如果没有创建新记录则返回0
     * @throws SQLException 如果在数据库操作过程中发生错误
     * @see #retrieveCurrentUpperBoundary()
     * @see #insertProgressByBoundary(LocalDateTime)
     */
    private int createProgressForChangesUpToNearestTimeBoundary() throws SQLException {
        LocalDateTime newBoundary = retrieveCurrentUpperBoundary();
        if (newBoundary == null) {
            return 0;
        }
        return insertProgressByBoundary(newBoundary);
    }

    private PcdcProgress findPrevRecord(LocalDateTime upperBoundary) throws SQLException {

        String query = stripLineByLine("""
                SELECT 
                    * 
                FROM 
                    `%s` 
                WHERE
                    upper_boundary < ?
                ORDER BY upper_boundary DESC LIMIT 1""").formatted(getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            try (PreparedStatement findPrevStmt = conn.prepareStatement(
                    query)) {
                findPrevStmt.setObject(1, upperBoundary);
                findPrevStmt.setFetchSize(1);
                try (java.sql.ResultSet rs = findPrevStmt.executeQuery()) {
                    if (rs.next()) {
                        return PcdcProgress.from(rs);
                    }
                    return null;
                }
            }
        }

    }

    private List<PcdcProgress> findNotCompleteForExecute(int limit) throws SQLException {

        String query = stripLineByLine("""
                select 
                    * 
                from
                    `%s`
                FORCE INDEX (%s)
                where
                    is_complete =0
                and lock_expire_time <= current_timestamp()
                order by upper_boundary asc limit ?
                """).formatted(getProgressTableName(),
                this.isCompleteDotLockExpireTimeDotUpperBoundaryIndex);
        List<PcdcProgress> pcdcProgressList = new ArrayList<>();
        try (Connection conn = dataSource.getConnection()) {
            try (PreparedStatement findNotCompleteStmt = conn.prepareStatement(
                    query)) {
                findNotCompleteStmt.setInt(1, limit);
                try (java.sql.ResultSet rs = findNotCompleteStmt.executeQuery()) {
                    while (rs.next()) {
                        PcdcProgress notCompleteProgress = PcdcProgress.from(rs);
                        pcdcProgressList.add(notCompleteProgress);
                    }
                }
            }
        }
        return pcdcProgressList;
    }


    private List<Map<String, Object>> queryChangedData(PcdcProgress currentPcdcProgress,
                                                       PcdcProgress prevPcdcProgress) throws SQLException {

        LocalDateTime lowerBoundary;
        if (prevPcdcProgress == null) {
            lowerBoundary = Timestamp.valueOf(MYSQL_MIN_DATETIME).toLocalDateTime();
        } else {
            lowerBoundary = prevPcdcProgress.getUpperBoundary();
        }
        if (currentPcdcProgress.getProgressDatetime().isAfter(lowerBoundary)) {
            lowerBoundary = currentPcdcProgress.getProgressDatetime();
        }

        String query = stripLineByLine("""
                SELECT 
                    %s
                FROM 
                    `%s`
                FORCE INDEX (%s)
                WHERE (`%s` = ? AND `%s` > ?)
                   OR (`%s` > ? AND `%s` < ?)
                ORDER BY `%s` ASC, `%s` ASC
                LIMIT ?;
                """).formatted(
                pcdcTableConfig.getColumnsToReportOnChange(),
                pcdcTableConfig.getCdcTableName(),
                updatedAtDotIdIndex,
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getIdColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getIdColumnName()
        );

        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            try (PreparedStatement queryChangedStmt = conn.prepareStatement(
                    query)) {
                queryChangedStmt.setObject(1, lowerBoundary);
                queryChangedStmt.setLong(2, currentPcdcProgress.getProgressId());
                queryChangedStmt.setObject(3, lowerBoundary);
                queryChangedStmt.setObject(4, currentPcdcProgress.getUpperBoundary());
                queryChangedStmt.setInt(5, pcdcThreadConfig.getBatchWorkSize());
                try (java.sql.ResultSet rs = queryChangedStmt.executeQuery()) {
                    return PcdcResultSetHandler.toMapList(rs);
                }
            }
        }
    }


    private int lockRecordForExecute(PcdcProgress pcdcProgress) throws SQLException {
        String query = stripLineByLine("""
                update 
                    `%s`
                set 
                    lock_expire_time = current_timestamp() + interval ? SECOND,
                    worker_id = ?
                where 
                    id = ?                    
                    and lock_expire_time <= current_timestamp()
                    and is_complete =0
                """).formatted(getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            try (PreparedStatement lockProgressStmt = conn.prepareStatement(
                    query)) {
                lockProgressStmt.setInt(1, pcdcThreadConfig.getBatchWorkTimeoutSeconds());
                lockProgressStmt.setInt(2, WORKER_ID_HOLDER.get());
                lockProgressStmt.setLong(3, pcdcProgress.getId());
                int rows = lockProgressStmt.executeUpdate();
                return rows;
            }
        }
    }

    private void updateErrorCounter(PcdcProgress pcdcProgress) throws SQLException {
        String query = stripLineByLine("""
                update 
                    `%s` 
                set 
                    error_counter= ? 
                where id = ? 
                and worker_id = ?
                """).formatted(getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            try (PreparedStatement updateErrCounterStmt = conn.prepareStatement(
                    query)) {
                updateErrCounterStmt.setShort(1, pcdcProgress.getErrorCounter());
                updateErrCounterStmt.setLong(2, pcdcProgress.getId());
                updateErrCounterStmt.setInt(3, WORKER_ID_HOLDER.get());
                int rows = updateErrCounterStmt.executeUpdate();
                if (rows == 0) {
                    throw new RuntimeException("Failed to update progress");
                }
            }
        }
    }

    private void updateProgress(PcdcProgress pcdcProgress) throws SQLException {
        String query = stripLineByLine("""
                update 
                    `%s`
                set 
                    progress_datetime = ?,
                    progress_id = ?,
                    lock_expire_time = current_timestamp() + interval ? SECOND,
                    processed_qty = ?,
                    is_complete = ?,
                    error_counter=0
                where 
                    id = ?                
                and worker_id = ?
                """).formatted(getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            try (PreparedStatement updateProgressStmt = conn.prepareStatement(
                    query)) {
                updateProgressStmt.setObject(1, pcdcProgress.getProgressDatetime());
                updateProgressStmt.setLong(2, pcdcProgress.getProgressId());
                updateProgressStmt.setInt(3, pcdcThreadConfig.getBatchWorkTimeoutSeconds());
                updateProgressStmt.setInt(4, pcdcProgress.getProcessedQty());
                updateProgressStmt.setBoolean(5, pcdcProgress.getIsComplete());
                updateProgressStmt.setLong(6, pcdcProgress.getId());
                updateProgressStmt.setInt(7, WORKER_ID_HOLDER.get());
                int rows = updateProgressStmt.executeUpdate();
                if (rows == 0) {
                    throw new RuntimeException("Failed to update progress");
                }
            }
        }
    }

    private void doComplete(PcdcProgress pcdcProgress) throws SQLException {
        String query = stripLineByLine("""
                update
                    `%s`
                set
                    is_complete = 1,
                    progress_datetime = ?,
                    progress_id = ?,
                    processed_qty = ?
                where
                    id = ?
                and worker_id = ?
                """).formatted(getProgressTableName());
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            try (PreparedStatement markCompleteStmt = conn.prepareStatement(
                    query)) {
                markCompleteStmt.setObject(1, pcdcProgress.getProgressDatetime());
                markCompleteStmt.setLong(2, pcdcProgress.getProgressId());
                markCompleteStmt.setInt(3, pcdcProgress.getProcessedQty());
                markCompleteStmt.setLong(4, pcdcProgress.getId());
                markCompleteStmt.setInt(5, WORKER_ID_HOLDER.get());
                int rows = markCompleteStmt.executeUpdate();
                if (rows == 0) {
                    throw new RuntimeException("Failed to finish");
                }
            }
        }
        LOGGER.debug("Completed processing for table: {}, record ID: {}", pcdcTableConfig.getCdcTableName(), pcdcProgress.getId());
    }


    private boolean hasPendingTransactions(LocalDateTime sLockFrom, LocalDateTime sLockUpTo) throws SQLException {
        String q = stripLineByLine("""
                SELECT
                    COUNT(*)
                FROM
                    `%s`
                WHERE
                    `%s` >= ?
                    AND `%s` < ?
                FOR SHARE NOWAIT
                """.formatted(pcdcTableConfig.getCdcTableName(),
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName()));
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            conn.setReadOnly(true);
            conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            try (PreparedStatement countStmt = conn.prepareStatement(
                    q)) {
                countStmt.setObject(1, sLockFrom);
                countStmt.setObject(2, sLockUpTo);
                try (java.sql.ResultSet rs = countStmt.executeQuery()) {
                    rs.next();
                    int count = rs.getInt(1);
                    LOGGER.debug("Count of changes up sLockUpTo boundary: {}", count);
                    return false;
                } catch (Exception e) {
                    LOGGER.error("Error while counting changes up sLockUpTo boundary", e);
                }
            }
        }
        return true;
    }

    private void checkUpdatesByTimeframe(PcdcProgress notCompleteProgress) throws SQLException, InterruptedException {
        PcdcProgress prevPcdcProgress = findPrevRecord(notCompleteProgress.getUpperBoundary());
        LocalDateTime timerStart = LocalDateTime.now();
        LocalDateTime sLockFrom = notCompleteProgress.getProgressDatetime();
        if (prevPcdcProgress != null) {
            LocalDateTime lowerBoundary = prevPcdcProgress.getUpperBoundary();
            if (lowerBoundary.isAfter(sLockFrom)) {
                sLockFrom = lowerBoundary;
            }
        }
        LocalDateTime sLockUpTo = notCompleteProgress.getUpperBoundary();
        int retryCount = 0;
        while (hasPendingTransactions(sLockFrom, sLockUpTo)) {
            retryCount++;
            LOGGER.error("Blocked By Pending Transaction,table = {} and progressDatetime >= {} ", pcdcTableConfig.getCdcTableName(), sLockFrom);
            if (retryCount > MAX_PENDING_BLOCK_RETRY) {
                return;
            }
            TimeUnit.MILLISECONDS.sleep(500);
        }

        while (true) {
            if (Thread.currentThread().isInterrupted()) {
                return;
            }
            List<Map<String, Object>> watchedData = queryChangedData(notCompleteProgress, prevPcdcProgress);
            if (watchedData.isEmpty()) {
                doComplete(notCompleteProgress);
                return;
            }
            Map<String, Object> lastRow = getLast(watchedData);
            LocalDateTime newProgressDatetime =
                    ((LocalDateTime) (lastRow.get(pcdcTableConfig.getUpdatedAtColumnName())));
            Long newProgressId =
                    ((Number) lastRow.get(pcdcTableConfig.getIdColumnName())).longValue();

            try {
                dataHandler.onData(pcdcTableConfig, watchedData);

                int size = watchedData.size();
                int processedQty = notCompleteProgress.getProcessedQty() + size;
                boolean isComplete = size < pcdcThreadConfig.getBatchWorkSize();
                long elapsedSeconds = Duration.between(timerStart, LocalDateTime.now()).toSeconds();

                notCompleteProgress.setProcessedQty(processedQty);
                notCompleteProgress.setProgressDatetime(newProgressDatetime);
                notCompleteProgress.setProgressId(newProgressId);
                notCompleteProgress.setIsComplete(isComplete);
                long remainingTimeoutSeconds = (pcdcThreadConfig.getBatchWorkTimeoutSeconds() - elapsedSeconds);
                if (isComplete
                        || remainingTimeoutSeconds < elapsedSeconds
                        || elapsedSeconds >= FORCE_SAVE_PROGRESS_SECONDS) {
                    updateProgress(notCompleteProgress);
                    timerStart = LocalDateTime.now();
                }

                if (isComplete) {
                    LOGGER.debug("Completed batch processing for table: {}, record ID: {}, batch size: {}",
                            pcdcTableConfig.getCdcTableName(), notCompleteProgress.getId(), size);
                    return;
                }
            } catch (Exception e) {
                LOGGER.error("Data handler error occurred for table: {}, record ID: {}. Error details: {}",
                        pcdcTableConfig.getCdcTableName(), notCompleteProgress.getId(), e.getMessage(), e);

                Short errorCounter = notCompleteProgress.getErrorCounter();
                notCompleteProgress.setErrorCounter((short) (errorCounter + 1));
                updateErrorCounter(notCompleteProgress);
                invokerErrorHandler(pcdcTableConfig, watchedData, e);
                throw new RuntimeException(e);
            }
        }
    }

    private void invokerErrorHandler(PcdcTableConfig pcdcTableConfig,
                                     List<Map<String, Object>> watchedData,
                                     Exception e) {
        if (errorHandler != null) {
            try {
                errorHandler.onError(pcdcTableConfig, watchedData, e);
            } catch (Exception ex) {
                LOGGER.error("Error handler execution failed for table: {}. Error details: {}",
                        pcdcTableConfig.getCdcTableName(), ex.getMessage(), ex);
            }
        }
    }

    /**
     * 检查并处理最近时间边界内的数据更新。
     * <p>
     * 此方法是PCDC的核心处理逻辑之一，负责检查并处理目标表中最近时间边界内的数据更新。
     * 处理流程包括：
     * <ol>
     *   <li>创建最近时间边界内的进度记录</li>
     *   <li>查找未完成的进度记录</li>
     *   <li>对每个未完成的进度记录，锁定并处理相应时间范围内的数据更新</li>
     *   <li>处理完成后，更新进度记录状态</li>
     * </ol>
     * <p>
     * 此方法通过批处理机制高效处理大量数据更新，并通过进度记录确保数据处理的可靠性和可恢复性。
     * 如果处理过程中发生错误，会调用配置的错误处理器进行处理。
     *
     * @throws SQLException 如果在数据库操作过程中发生错误
     * @see #createProgressForChangesUpToNearestTimeBoundary()
     * @see #findNotCompleteForExecute(int)
     * @see #lockRecordForExecute(PcdcProgress)
     * @see #queryChangedData(PcdcProgress, PcdcProgress)
     * @see #handleDataUpdate()
     * @see #doComplete(PcdcProgress)
     */
    private void checkUpdatesUpToNearestTimeBoundary() throws SQLException {
        int newProgressRecords = createProgressForChangesUpToNearestTimeBoundary();
        if (newProgressRecords > 0) {
            LOGGER.debug("Found {} new update(s) for table: {}", newProgressRecords, pcdcTableConfig.getCdcTableName());
        }
    }

    private void handleDataUpdate() throws SQLException, InterruptedException {
        final LocalDateTime end = LocalDateTime.now()
                .plusSeconds(pcdcThreadConfig.getPollingIntervalSeconds());

        while (LocalDateTime.now().isBefore(end)) {
            if (!workerPool.getQueue().isEmpty()) {
                TimeUnit.MILLISECONDS.sleep(500);
                continue;
            }
            List<PcdcProgress> notCompleteList = findNotCompleteForExecute(DEFAULT_PROGRESS_FETCH_SIZE);
            if (notCompleteList == null || notCompleteList.isEmpty()) {
                return;
            }
            for (PcdcProgress notCompleteProgress : notCompleteList) {
                workerPool.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            initWorkerId();
                            int locked = lockRecordForExecute(notCompleteProgress);
                            if (locked == 0) {
                                LOGGER.error("Failed to lock record for table: {}, record ID: {}",
                                        pcdcTableConfig.getCdcTableName(), notCompleteProgress.getId());
                                TimeUnit.MILLISECONDS.sleep(new Random().nextInt(3000));
                                return;
                            }
                            checkUpdatesByTimeframe(notCompleteProgress);
                        } catch (Exception e) {
                            LOGGER.error("Error occurred while checking updates by timeframe for table: {}. Error details: {}",
                                    pcdcTableConfig.getCdcTableName(), e.getMessage(), e);
                        }
                    }
                });
            }
        }


    }


    private int countRecordsInRange(LocalDateTime start, LocalDateTime end) throws SQLException {
        String query = stripLineByLine("""
                select 
                    count(*)
                from 
                    `%s`
                FORCE INDEX (%s)    
                where 
                    `%s` >= ?
                and `%s` < ?
                """.formatted(pcdcTableConfig.getCdcTableName(),
                updatedAtDotIdIndex,
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName()));
        try (Connection conn = dataSource.getConnection()) {
            try (PreparedStatement countStmt = conn.prepareStatement(query)) {
                countStmt.setObject(1, start);
                countStmt.setObject(2, end);
                try (java.sql.ResultSet rs = countStmt.executeQuery()) {
                    if (rs.next()) {
                        return rs.getInt(1);
                    }
                    return 0;
                }
            }
        }
    }

    private PcdcBacklogDataVolumeStats calculateBacklogDataVolume(LocalDateTime upperBoundary) throws SQLException {
        String query = stripLineByLine("""
                SELECT
                MAX(`%s`) max_updated_at,
                MIN(`%s`) min_updated_at
                FROM `%s`
                WHERE `%s` >= ?
                """.formatted(
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getUpdatedAtColumnName(),
                pcdcTableConfig.getCdcTableName(),
                pcdcTableConfig.getUpdatedAtColumnName()));
        try (Connection conn = dataSource.getConnection()) {
            try (PreparedStatement statisticsStmt = conn.prepareStatement(query)) {
                statisticsStmt.setObject(1, upperBoundary);
                try (java.sql.ResultSet rs = statisticsStmt.executeQuery()) {
                    if (rs.next()) {
                        PcdcBacklogDataVolumeStats pcdcBacklogDataVolumeStats = new PcdcBacklogDataVolumeStats();
                        pcdcBacklogDataVolumeStats.setMaxUpdatedAt(rs.getObject(COL_MAX_UPDATED_AT, LocalDateTime.class));
                        pcdcBacklogDataVolumeStats.setMinUpdatedAt(rs.getObject(COL_MIN_UPDATED_AT, LocalDateTime.class));
                        return pcdcBacklogDataVolumeStats;
                    }
                    return null;
                }
            }
        }
    }


    private LocalDateTime getNearestPartitionBoundary(LocalDateTime maxUpdatedAt) {
        final int partitionIntervalSeconds = pcdcThreadConfig.getMinPartitionIntervalSeconds();
        int totalSeconds = maxUpdatedAt.getHour() * 3600
                + maxUpdatedAt.getMinute() * 60
                + maxUpdatedAt.getSecond();
        int secondsToSubtract;
        if (totalSeconds % partitionIntervalSeconds == 0) {
            secondsToSubtract = partitionIntervalSeconds;
        } else {
            secondsToSubtract = totalSeconds % partitionIntervalSeconds;
        }
        return maxUpdatedAt.minusSeconds(secondsToSubtract);
    }


    /**
     * 在PCDC启动时执行数据分区操作。
     * <p>
     * 此方法在PCDC启动时执行，用于初始化数据分区。它会根据目标表中的数据量和配置的分区策略，
     * 将历史数据划分为多个时间段，并为每个时间段创建进度记录。
     * <p>
     * 分区过程包括：
     * <ol>
     *   <li>检查是否已存在进度记录，如果存在则跳过分区</li>
     *   <li>计算目标表中的数据量</li>
     *   <li>根据数据量和配置的批处理大小，确定分区数量</li>
     *   <li>根据分区数量，将数据按时间范围划分为多个分区</li>
     *   <li>为每个分区创建进度记录</li>
     * </ol>
     * <p>
     * 分区操作有助于提高大量历史数据的处理效率，通过并行处理多个时间段的数据，
     * 减少单个批处理任务的负载。
     *
     * @throws SQLException 如果在数据库操作过程中发生错误
     * @see #partitionDataAdaptive(LocalDateTime, LocalDateTime)
     * @see #insertProgressByBoundary(LocalDateTime)
     */
    private void partitionOnStartup() throws SQLException {
        LocalDateTime maxUpperBoundary = Timestamp.valueOf(MYSQL_MIN_DATETIME).toLocalDateTime();
        PcdcProgress prevRecord = findPrevRecord(Timestamp.valueOf(MYSQL_MAX_DATETIME).toLocalDateTime());
        if (prevRecord != null) {
            maxUpperBoundary = prevRecord.getUpperBoundary();
        }
        PcdcBacklogDataVolumeStats pcdcBacklogDataVolumeStats = calculateBacklogDataVolume(maxUpperBoundary);
        if (pcdcBacklogDataVolumeStats == null) {
            return;
        }
        LocalDateTime maxUpdatedAt = pcdcBacklogDataVolumeStats.getMaxUpdatedAt();
        LocalDateTime minUpdatedAt = pcdcBacklogDataVolumeStats.getMinUpdatedAt();
        if (minUpdatedAt == null
                || maxUpdatedAt == null) {
            return;
        }
        LocalDateTime start = maxUpperBoundary;
        LocalDateTime startTimeBoundary = getNearestPartitionBoundary(minUpdatedAt);
        if (maxUpperBoundary.isBefore(startTimeBoundary)) {
            start = startTimeBoundary;
        }
        LocalDateTime endTimeBoundary = getNearestPartitionBoundary(maxUpdatedAt);
        if (!start.isBefore(endTimeBoundary)) {
            return;
        }
        //根据1天的数据量估算分区时间间隔
        int recordsInWeek = countRecordsInRange(start, start.plusDays(1));
        int s = ceilDivision(pcdcThreadConfig.getPartitionThreshold(), recordsInWeek);
        while (start.isBefore(endTimeBoundary)) {
            LocalDateTime nextBoundary = start.plusDays(s);
            if (endTimeBoundary.isBefore(nextBoundary)) {
                nextBoundary = endTimeBoundary;
            }
            partitionDataAdaptive(start, nextBoundary);
            start = nextBoundary;
        }
    }


    private void partitionDataAdaptive(LocalDateTime start,
                                       LocalDateTime end) throws SQLException {
        if (!start.isBefore(end)) {
            throw new IllegalArgumentException("start must be before end");
        }
        int totalRecordCount = countRecordsInRange(start, end);
        if (totalRecordCount == 0) {
            return;
        }
        final int minTimeSpan = pcdcThreadConfig.getMinPartitionIntervalSeconds();
        final int partitionMaxSize = pcdcThreadConfig.getPartitionThreshold();

        long seconds = Duration.between(start, end).toSeconds();
        //分片时间间间隔达到阈值 或者 分片记录集大小满足要求则不分片
        if (seconds <= minTimeSpan || totalRecordCount <= partitionMaxSize) {
            insertProgressByBoundary(end);
            return;
        }

        int partitionCount = ceilDivision(totalRecordCount, partitionMaxSize);
        //天花板除 partitionCount 不小于2
        assert partitionCount >= 2;
        //需要保证deltaSeconds>1
        //不然1秒内的记录数超过partitionMaxSize的时候会进入无限递归
        int seg = ceilDivision(seconds, partitionCount);
        int rounded = (seg / minTimeSpan) * minTimeSpan;
        int deltaSeconds = Math.max(rounded, minTimeSpan);

        assert deltaSeconds < seconds;
        for (int i = deltaSeconds; start.isBefore(end); i += deltaSeconds) {
            LocalDateTime nextBoundary = start.plusSeconds(i);
            if (nextBoundary.isAfter(end)) {
                nextBoundary = end;
            }
            partitionDataAdaptive(start, nextBoundary);
            start = nextBoundary;
        }
    }

    private int insertProgressByBoundary(LocalDateTime newUpperBoundary) throws SQLException {
        String tryInsertQuery = stripLineByLine("""
                INSERT IGNORE INTO `%s`
                    (upper_boundary)
                VALUES
                    (?)
                """).formatted(getProgressTableName());

        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(true);
            try (PreparedStatement insertIgnoreStmt = conn.prepareStatement(
                    tryInsertQuery)) {
                insertIgnoreStmt.setObject(1, newUpperBoundary);
                int insertedRows = insertIgnoreStmt.executeUpdate();
                if (insertedRows == 0) {
                    LOGGER.debug("Progress already exists for table: {}, boundary: {}",
                            pcdcTableConfig.getCdcTableName(), newUpperBoundary);
                }
                return insertedRows;
            }
        }
    }


    private void initWorkerId() {
        Integer workerId = WORKER_ID_HOLDER.get();
        if (workerId == null) {
            workerId = sequence.getWorkerId();
            WORKER_ID_HOLDER.set(workerId);
        }
    }

    /**
     * 启动PCDC后台监控线程。
     * <p>
     * 此方法启动一个守护线程，该线程会定期检查目标表的数据变更并进行处理。
     * 线程启动后会执行以下操作：
     * <ol>
     *   <li>初始化工作ID</li>
     *   <li>根据数据量进行初始分区处理</li>
     *   <li>开始周期性检查数据变更</li>
     *   <li>根据配置的轮询间隔进行休眠</li>
     * </ol>
     * <p>
     * 监控线程会一直运行，直到调用{@link #shutdown()}方法或应用程序终止。
     * 如果PCDC实例已经启动或已被关闭，则此方法会记录错误并返回，不会重复启动。
     * <p>
     * 注意：此方法是非阻塞的，它会启动后台线程并立即返回。
     *
     * @throws SQLException     如果在初始化过程中发生数据库错误
     * @throws RuntimeException 如果工作ID初始化失败或发生其他运行时错误
     * @see #shutdown()
     * @since 1.0
     */
    public void startup() throws SQLException {

        synchronized (this) {
            if (stopped) {
                LOGGER.error("PCDC process already stopped for table: {}. Cannot start process.", pcdcTableConfig.getCdcTableName());
                return;
            }
            if (isRunning) {
                LOGGER.error("PCDC process already running for table: {}. Cannot start duplicate process.", pcdcTableConfig.getCdcTableName());
                return;
            }
            isRunning = true;
        }

        try {
            initWorkerId();
        } catch (Exception e) {
            LOGGER.error("Failed to initialize worker id for table: {}. Error details: {}",
                    pcdcTableConfig.getCdcTableName(),
                    e.getMessage(),
                    e);
            throw new RuntimeException(e);
        }
        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    initWorkerId();
                    Thread.currentThread()
                            .setName(PcdcThreadFactory.masterThreadPrefix(pcdcTableConfig)
                                    + WORKER_ID_HOLDER.get());
                    partitionOnStartup();
                } catch (Exception e) {
                    LOGGER.error("Failed to initialize partitions for table: {}. Error details: {}",
                            pcdcTableConfig.getCdcTableName(), e.getMessage(), e);
                }
                while (true) {
                    try {
                        LocalDateTime start = LocalDateTime.now();
                        LOGGER.debug("Starting update check for table: {}", pcdcTableConfig.getCdcTableName());
                        try {
                            checkUpdatesUpToNearestTimeBoundary();
                            handleDataUpdate();
                        } catch (Exception e) {
                            LOGGER.error("PCDC processing error occurred for table: {}. Error details: {}", pcdcTableConfig.getCdcTableName(), e.getMessage(), e);
                        }
                        long seconds = Duration.between(start, LocalDateTime.now()).toSeconds();
                        if (seconds < pcdcThreadConfig.getPollingIntervalSeconds()) {
                            TimeUnit.SECONDS.sleep(pcdcThreadConfig.getPollingIntervalSeconds() - seconds);
                        }
                    } catch (InterruptedException e) {
                        //ignore
                    }
                }
            }
        });
        t.setDaemon(true);
        t.start();

    }

    /**
     * 关闭PCDC实例并释放相关资源。
     * <p>
     * 此方法实现了{@link AutoCloseable}接口，允许在try-with-resources语句中使用Pcdc实例。
     * 调用此方法会委托给{@link #shutdown()}方法，执行实际的关闭操作。
     * <p>
     * 示例用法：
     * <pre>
     * try (Pcdc pcdc = new PcdcBuilder(dataSource, "my_table")
     *     .dataHandler(myDataHandler)
     *     .build()) {
     *     pcdc.startup();
     *     // 应用程序逻辑...
     * } // 在try块结束时自动调用close()方法
     * </pre>
     *
     * @see #shutdown()
     * @see AutoCloseable#close()
     * @since 1.0
     */
    @Override
    public void close() {
        shutdown();
    }

    private String getProgressTableName() {
        return progressTableName;
    }


    /**
     * 关闭PCDC后台监控线程。
     * <p>
     * 此方法会安全地关闭PCDC的后台监控线程。关闭过程包括：
     * <ol>
     *   <li>设置停止标志，通知监控线程停止运行</li>
     *   <li>标记实例为已停止状态，防止重新启动</li>
     *   <li>立即关闭工作线程池，确保所有任务都被正确处理</li>
     * </ol>
     * <p>
     * 此方法是线程安全的，使用synchronized关键字确保在多线程环境中的安全执行。
     * 如果PCDC实例尚未启动或已经关闭，则此方法不会执行任何操作。
     * <p>
     * 注意：调用此方法后，如果需要重新启动PCDC，必须创建一个新的Pcdc实例。
     *
     * @see #startup()
     * @see #close()
     * @since 1.0
     */
    public synchronized void shutdown() {
        if (stopped) {
            return;
        }
        if (isRunning) {
            isRunning = false;
            stopped = true;
            workerPool.shutdownNow();
        }
    }
}
