// Copyright 2022 Matrix Origin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package frontend

import (
	"context"
	"encoding/json"
	"regexp"
	"strconv"
	"sync"
	"time"

	"github.com/matrixorigin/matrixone/pkg/objectio"

	"github.com/matrixorigin/matrixone/pkg/catalog"
	"github.com/matrixorigin/matrixone/pkg/cdc"
	"github.com/matrixorigin/matrixone/pkg/common/moerr"
	"github.com/matrixorigin/matrixone/pkg/common/mpool"
	"github.com/matrixorigin/matrixone/pkg/container/types"
	"github.com/matrixorigin/matrixone/pkg/defines"
	"github.com/matrixorigin/matrixone/pkg/fileservice"
	"github.com/matrixorigin/matrixone/pkg/logutil"
	"github.com/matrixorigin/matrixone/pkg/pb/task"
	"github.com/matrixorigin/matrixone/pkg/taskservice"
	"github.com/matrixorigin/matrixone/pkg/txn/client"
	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
	v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2"
	"github.com/matrixorigin/matrixone/pkg/vm/engine"
	"go.uber.org/zap"
)

var CDCExectorError_QueryDaemonTaskTimeout = moerr.NewInternalErrorNoCtx("query daemon task timeout")

var CDCExeutorAllocator *mpool.MPool

func init() {
	var err error
	mpool.DeleteMPool(CDCExeutorAllocator)
	if CDCExeutorAllocator, err = mpool.NewMPool("cdc_executor", 0, mpool.NoFixed); err != nil {
		panic(err)
	}
}

func CDCTaskExecutorFactory(
	logger *zap.Logger,
	sqlExecutorFactory func() ie.InternalExecutor,
	attachToTask func(context.Context, uint64, taskservice.ActiveRoutine) error,
	cnUUID string,
	ts taskservice.TaskService,
	fs fileservice.FileService,
	txnClient client.TxnClient,
	txnEngine engine.Engine,
) taskservice.TaskExecutor {
	return func(ctx context.Context, spec task.Task) error {
		ctx1, cancel := context.WithTimeoutCause(
			ctx, time.Second*5, CDCExectorError_QueryDaemonTaskTimeout,
		)
		defer cancel()
		tasks, err := ts.QueryDaemonTask(
			ctx1,
			taskservice.WithTaskIDCond(taskservice.EQ, spec.GetID()),
		)
		if err != nil {
			return err
		}
		if len(tasks) != 1 {
			return moerr.NewInternalErrorf(ctx, "invalid tasks count %d", len(tasks))
		}
		details, ok := tasks[0].Details.Details.(*task.Details_CreateCdc)
		if !ok {
			return moerr.NewInternalError(ctx, "invalid details type")
		}

		exec := NewCDCTaskExecutor(
			logger,
			sqlExecutorFactory(),
			details.CreateCdc,
			cnUUID,
			fs,
			txnClient,
			txnEngine,
			CDCExeutorAllocator,
		)
		exec.activeRoutine = cdc.NewCdcActiveRoutine()
		if err = attachToTask(ctx, spec.GetID(), exec); err != nil {
			return err
		}
		return exec.Start(ctx)
	}
}

type CDCTaskExecutor struct {
	sync.Mutex

	logger *zap.Logger
	ie     ie.InternalExecutor

	cnUUID      string
	cnTxnClient client.TxnClient
	cnEngine    engine.Engine
	fileService fileservice.FileService

	spec *task.CreateCdcDetails

	mp         *mpool.MPool
	packerPool *fileservice.Pool[*types.Packer]

	sinkUri          cdc.UriInfo
	tables           cdc.PatternTuples
	exclude          *regexp.Regexp
	startTs, endTs   types.TS
	noFull           bool
	additionalConfig map[string]interface{}

	activeRoutine *cdc.ActiveRoutine
	// watermarkUpdater update the watermark of the items that has been sunk to downstream
	watermarkUpdater *cdc.CDCWatermarkUpdater
	// runningReaders store the running execute pipelines, map key pattern: db.table
	runningReaders *sync.Map

	// stateMachine manages executor state transitions
	stateMachine *ExecutorStateMachine
	holdCh       chan int

	// start wrapper, for ut
	startFunc func(ctx context.Context) error
}

func NewCDCTaskExecutor(
	logger *zap.Logger,
	ie ie.InternalExecutor,
	spec *task.CreateCdcDetails,
	cnUUID string,
	fileService fileservice.FileService,
	cnTxnClient client.TxnClient,
	cnEngine engine.Engine,
	cdcMp *mpool.MPool,
) *CDCTaskExecutor {
	task := &CDCTaskExecutor{
		logger:      logger,
		ie:          ie,
		spec:        spec,
		cnUUID:      cnUUID,
		fileService: fileService,
		cnTxnClient: cnTxnClient,
		cnEngine:    cnEngine,
		mp:          cdcMp,
		packerPool: fileservice.NewPool(
			128,
			func() *types.Packer {
				return types.NewPacker()
			},
			func(packer *types.Packer) {
				packer.Reset()
			},
			func(packer *types.Packer) {
				packer.Close()
			},
		),
		stateMachine: NewExecutorStateMachine(), // Initialize state machine
		holdCh:       make(chan int, 1),         // Initialize holdCh to prevent race condition
	}
	task.startFunc = task.Start
	return task
}

func (exec *CDCTaskExecutor) Start(rootCtx context.Context) (err error) {
	taskId := exec.spec.TaskId
	taskName := exec.spec.TaskName
	cnUUID := exec.cnUUID
	accountId := uint32(exec.spec.Accounts[0].GetId())
	detector := cdc.GetTableDetector(cnUUID)
	var (
		registered      bool
		enteredStarting bool
	)

	// Check if this task is already registered in TableDetector
	// This prevents duplicate task execution when taskservice schedules the same task twice
	if detector.IsTaskRegistered(taskId) {
		logutil.Warn(
			"cdc.frontend.task.already_registered",
			zap.String("task-id", taskId),
			zap.String("task-name", taskName),
			zap.String("cn-uuid", cnUUID),
			zap.Uint32("account-id", accountId),
			zap.String("reason", "task is already registered in TableDetector, skipping duplicate start"),
		)
		return moerr.NewInternalErrorf(rootCtx, "task %s is already running", taskId)
	}

	defer func() {
		if err != nil {
			if registered {
				detector.UnRegister(taskId)
			}

			// Transition to Failed state only if we entered Starting state
			if enteredStarting {
				if setFailErr := exec.stateMachine.SetFailed(err.Error()); setFailErr != nil {
					logutil.Warn(
						"cdc.frontend.task.set_state_failed",
						zap.String("target-state", StateFailed.String()),
						zap.Error(setFailErr),
					)
				}

				// Metrics: task failed
				v2.CdcTaskTotalGauge.WithLabelValues("failed").Inc()
				v2.CdcTaskErrorCounter.WithLabelValues("start_failed", "false").Inc()
			}

			// if Start failed, there will be some dangle goroutines(watermarkUpdater, reader, sinker...)
			// need to close them to avoid goroutine leak
			exec.activeRoutine.ClosePause()
			exec.activeRoutine.CloseCancel()

			updateErrMsgErr := exec.updateErrMsg(rootCtx, err.Error())
			logutil.Error(
				"cdc.frontend.task.start_failed",
				zap.String("task-id", taskId),
				zap.String("task-name", taskName),
				zap.String("state", exec.stateMachine.State().String()),
				zap.Error(err),
				zap.NamedError("update-err-msg-err", updateErrMsgErr),
			)
		}
	}()

	ctx := defines.AttachAccountId(rootCtx, accountId)

	// get cdc task definition
	if err = exec.retrieveCdcTask(ctx); err != nil {
		return err
	}

	dbs := make([]string, 0, len(exec.tables.Pts))
	tables := make([]string, 0, len(exec.tables.Pts))
	for _, pt := range exec.tables.Pts {
		dbs = append(dbs, pt.Source.Database)
		tables = append(tables, pt.Source.Table)
	}

	// Clean up old readers instead of replacing the map
	// This ensures old readers are properly stopped and prevents goroutine leaks
	if exec.runningReaders != nil {
		exec.runningReaders.Range(func(key, value interface{}) bool {
			reader := value.(cdc.ChangeReader)
			reader.Close()
			return true
		})

		exec.runningReaders.Range(func(key, value interface{}) bool {
			reader := value.(cdc.ChangeReader)
			reader.Wait()
			return true
		})

		exec.runningReaders.Range(func(key, value interface{}) bool {
			exec.runningReaders.Delete(key)
			return true
		})
	} else {
		exec.runningReaders = &sync.Map{}
	}

	// start watermarkUpdater
	exec.watermarkUpdater = cdc.GetCDCWatermarkUpdater(exec.cnUUID, exec.ie)

	// register to table scanner
	if !detector.RegisterIfAbsent(taskId, accountId, dbs, tables, exec.handleNewTables) {
		logutil.Warn(
			"cdc.frontend.task.duplicate_registration_detected",
			zap.String("task-id", taskId),
			zap.String("task-name", taskName),
			zap.String("cn-uuid", cnUUID),
			zap.Uint32("account-id", accountId),
			zap.String("reason", "RegisterIfAbsent rejected duplicate task"),
		)
		return moerr.NewInternalErrorf(ctx, "task %s is already running", taskId)
	}
	registered = true

	// Transition to Starting state (skip if already Starting, e.g., from Resume)
	if exec.stateMachine.State() != StateStarting {
		if err = exec.stateMachine.Transition(TransitionStart); err != nil {
			detector.UnRegister(taskId)
			registered = false
			return moerr.NewInternalErrorf(ctx, "cannot start: %v", err)
		}
	}
	enteredStarting = true

	logutil.Info(
		"cdc.frontend.task.start",
		zap.String("task-id", taskId),
		zap.String("task-name", taskName),
		zap.String("cn-uuid", cnUUID),
		zap.Uint32("account-id", accountId),
		zap.String("state", exec.stateMachine.State().String()),
	)

	// Transition to Running state
	if err = exec.stateMachine.Transition(TransitionStartSuccess); err != nil {
		return moerr.NewInternalErrorf(ctx, "cannot transition to running: %v", err)
	}

	// Metrics: task started
	v2.CdcTaskTotalGauge.WithLabelValues("running").Inc()
	v2.CdcTaskStateChangeCounter.WithLabelValues("starting", "running").Inc()

	// start success, clear err msg
	clearErrMsgErr := exec.updateErrMsg(ctx, "")

	logutil.Info(
		"cdc.frontend.task.start_success",
		zap.String("task-id", taskId),
		zap.String("task-name", taskName),
		zap.String("state", exec.stateMachine.State().String()),
		zap.NamedError("clear-err-msg-err", clearErrMsgErr),
	)

	// hold - wait for Pause/Cancel/Restart signal
	select {
	case <-ctx.Done():
		break
	case <-exec.holdCh:
		break
	}
	return
}

// Resume cdc task from last recorded watermark
func (exec *CDCTaskExecutor) Resume() error {
	// Transition to Starting state (via Resume transition)
	if err := exec.stateMachine.Transition(TransitionResume); err != nil {
		return moerr.NewInternalErrorf(context.Background(), "cannot resume: %v", err)
	}

	// Log watermark states before resume
	exec.logCurrentWatermarks("before_resume")

	logutil.Info(
		"cdc.frontend.task.resume_start",
		zap.String("task-id", exec.spec.TaskId),
		zap.String("task-name", exec.spec.TaskName),
		zap.String("state", exec.stateMachine.State().String()),
	)
	defer func() {
		// Metrics: task resumed
		v2.CdcTaskTotalGauge.WithLabelValues("paused").Dec()
		v2.CdcTaskStateChangeCounter.WithLabelValues("paused", "starting").Inc()

		logutil.Info(
			"cdc.frontend.task.resume_success",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.String("state", exec.stateMachine.State().String()),
		)
	}()

	// Clear all table errors before resuming
	// This allows tables with non-retryable errors to be retried after user fixes the issues
	ctx := defines.AttachAccountId(context.Background(), uint32(exec.spec.Accounts[0].GetId()))
	if err := exec.clearAllTableErrors(ctx); err != nil {
		logutil.Warn(
			"cdc.frontend.task.resume_clear_errors_failed",
			zap.String("task-id", exec.spec.TaskId),
			zap.Error(err),
		)
		// Don't fail Resume if clearing errors fails - continue anyway
	}

	// FIX: Unmark task as paused to allow watermark updates
	if exec.watermarkUpdater != nil {
		exec.watermarkUpdater.UnmarkTaskPaused(exec.spec.TaskId)
	}

	go func() {
		// closed in Pause, need renew
		exec.activeRoutine = cdc.NewCdcActiveRoutine()
		if err := exec.startFunc(context.Background()); err != nil {
			logutil.Error(
				"cdc.frontend.task.resume_start_failed",
				zap.String("task-id", exec.spec.TaskId),
				zap.String("task-name", exec.spec.TaskName),
				zap.String("state", exec.stateMachine.State().String()),
				zap.Error(err),
			)
		} else {
			// Log watermark states after resume completed
			exec.logCurrentWatermarks("after_resume")
		}
	}()
	return nil
}

// Restart cdc task from init watermark
func (exec *CDCTaskExecutor) Restart() error {
	// Transition to Restarting state
	if err := exec.stateMachine.Transition(TransitionRestart); err != nil {
		return moerr.NewInternalErrorf(context.Background(), "cannot restart: %v", err)
	}

	// FIX: Unmark task as paused to allow watermark updates after restart
	// Without this, if task was paused before restart, it would remain in pausedTasks
	// and all watermark updates would be blocked, causing CDC to stop working
	if exec.watermarkUpdater != nil {
		exec.watermarkUpdater.UnmarkTaskPaused(exec.spec.TaskId)
	}

	logutil.Info(
		"cdc.frontend.task.restart_start",
		zap.String("task-id", exec.spec.TaskId),
		zap.String("task-name", exec.spec.TaskName),
		zap.String("state", exec.stateMachine.State().String()),
	)
	defer func() {
		logutil.Info(
			"cdc.frontend.task.restart_success",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.String("state", exec.stateMachine.State().String()),
		)
	}()

	if exec.stateMachine.IsRunning() {
		cdc.GetTableDetector(exec.cnUUID).UnRegister(exec.spec.TaskId)
		exec.activeRoutine.CloseCancel()
		// let Start() go
		select {
		case <-exec.holdCh:
		default:
		}
		select {
		case exec.holdCh <- 1:
		default:
		}
	}

	// Transition to Starting state (beginning restart)
	if err := exec.stateMachine.Transition(TransitionRestartBegin); err != nil {
		return moerr.NewInternalErrorf(context.Background(), "cannot begin restart: %v", err)
	}

	go func() {
		exec.activeRoutine = cdc.NewCdcActiveRoutine()
		if err := exec.startFunc(context.Background()); err != nil {
			logutil.Error(
				"cdc.frontend.task.restart_start_failed",
				zap.String("task-id", exec.spec.TaskId),
				zap.String("task-name", exec.spec.TaskName),
				zap.String("state", exec.stateMachine.State().String()),
				zap.Error(err),
			)
		}
	}()
	return nil
}

// Pause cdc task
func (exec *CDCTaskExecutor) Pause() error {
	// Check if running before state transition
	wasRunning := exec.stateMachine.IsRunning()

	// Transition to Pausing state
	if err := exec.stateMachine.Transition(TransitionPause); err != nil {
		return moerr.NewInternalErrorf(context.Background(), "cannot pause: %v", err)
	}

	// FIX: Mark task as paused ASAP to maximize blocking window
	// This prevents watermark updates from commits that start after pause signal
	// Trade-off: May block legitimate commits during stopAllReaders (causing data duplication)
	// but prevents data loss which is more severe
	// CDC design: duplication is acceptable (handled by downstream), loss is not
	if exec.watermarkUpdater != nil {
		exec.watermarkUpdater.MarkTaskPaused(exec.spec.TaskId)
	}

	// Log watermark states for all running tables before pause
	exec.logCurrentWatermarks("before_pause")

	pauseStartTime := time.Now()
	logutil.Info(
		"cdc.frontend.task.pause_start",
		zap.String("task-id", exec.spec.TaskId),
		zap.String("task-name", exec.spec.TaskName),
		zap.String("state", exec.stateMachine.State().String()),
		zap.Bool("was-running", wasRunning),
	)
	defer func() {
		pauseDuration := time.Since(pauseStartTime)
		// Transition to Paused state
		if err := exec.stateMachine.Transition(TransitionPauseComplete); err != nil {
			logutil.Warn(
				"cdc.frontend.task.transition_paused_failed",
				zap.Error(err),
			)
		}

		// Metrics: task paused
		if wasRunning {
			v2.CdcTaskTotalGauge.WithLabelValues("running").Dec()
			v2.CdcTaskTotalGauge.WithLabelValues("paused").Inc()
			v2.CdcTaskStateChangeCounter.WithLabelValues("running", "paused").Inc()
		}

		logutil.Info(
			"cdc.frontend.task.pause_success",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.String("state", exec.stateMachine.State().String()),
			zap.Duration("pause-duration", pauseDuration),
		)
	}()

	if wasRunning {
		cdc.GetTableDetector(exec.cnUUID).UnRegister(exec.spec.TaskId)
		exec.activeRoutine.ClosePause()

		// Synchronously wait for all readers to stop before proceeding
		// This ensures no goroutine leaks and clean pause state
		exec.stopAllReaders()

		// Note: task was marked as paused earlier (before ClosePause) to maximize blocking window
		// This may cause some watermark updates during stopAllReaders to be blocked,
		// leading to minor data duplication on resume, but prevents data loss

		// FIX: Force flush watermarks with timeout
		// This ensures all legitimate watermarks (from commits completed before pause)
		// are persisted to database before marking pause as complete
		// Without this, watermarks in cacheUncommitted would be lost, causing data duplication on resume
		if exec.watermarkUpdater != nil {
			flushCtx, cancel := context.WithTimeout(
				defines.AttachAccountId(context.Background(), uint32(exec.spec.Accounts[0].GetId())),
				30*time.Second, // 30s timeout to prevent hanging
			)
			defer cancel()

			if err := exec.watermarkUpdater.ForceFlush(flushCtx); err != nil {
				logutil.Error(
					"cdc.frontend.task.pause_force_flush_failed",
					zap.String("task-id", exec.spec.TaskId),
					zap.Error(err),
				)
				// Return error to ensure data consistency
				// Pause failure is acceptable, data inconsistency is not
				return moerr.NewInternalErrorf(context.Background(),
					"pause failed: unable to flush watermarks: %v", err)
			}

			logutil.Info(
				"cdc.frontend.task.pause_watermark_flushed",
				zap.String("task-id", exec.spec.TaskId),
			)
		}

		// Log watermark states after all readers stopped and watermarks flushed
		exec.logCurrentWatermarks("after_pause")

		// let Start() go
		select {
		case exec.holdCh <- 1:
			// Signal sent successfully
		default:
			// Channel full or Start() already exited, ignore
		}
	}
	return nil
}

// Cancel cdc task
func (exec *CDCTaskExecutor) Cancel() error {
	// Check if running before state transition
	wasRunning := exec.stateMachine.IsRunning()

	// Transition to Cancelling state
	if err := exec.stateMachine.Transition(TransitionCancel); err != nil {
		return moerr.NewInternalErrorf(context.Background(), "cannot cancel: %v", err)
	}

	// FIX: Unmark task as paused to prevent pausedTasks leakage
	// If task was paused before cancel, we need to clean up the pause mark
	if exec.watermarkUpdater != nil {
		exec.watermarkUpdater.UnmarkTaskPaused(exec.spec.TaskId)
	}

	logutil.Info(
		"cdc.frontend.task.cancel_start",
		zap.String("task-id", exec.spec.TaskId),
		zap.String("task-name", exec.spec.TaskName),
		zap.String("state", exec.stateMachine.State().String()),
		zap.Bool("was-running", wasRunning),
	)
	defer func() {
		// Transition to Cancelled state
		if err := exec.stateMachine.Transition(TransitionCancelComplete); err != nil {
			logutil.Warn(
				"cdc.frontend.task.transition_cancelled_failed",
				zap.Error(err),
			)
		}

		// Metrics: task cancelled
		if wasRunning {
			v2.CdcTaskTotalGauge.WithLabelValues("running").Dec()
			v2.CdcTaskStateChangeCounter.WithLabelValues("running", "cancelled").Inc()
		}

		logutil.Info(
			"cdc.frontend.task.cancel_success",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.String("state", exec.stateMachine.State().String()),
		)
	}()

	if wasRunning {
		cdc.GetTableDetector(exec.cnUUID).UnRegister(exec.spec.TaskId)
		exec.activeRoutine.CloseCancel()

		// Synchronously wait for all readers to stop before proceeding
		// This ensures no goroutine leaks and no interference with new tasks
		exec.stopAllReaders()

		// let Start() go
		select {
		case exec.holdCh <- 1:
			// Signal sent successfully
		default:
			// Channel full or Start() already exited, ignore
		}
	}
	return nil
}

// logCurrentWatermarks logs current watermarks for all tables in this task
func (exec *CDCTaskExecutor) logCurrentWatermarks(phase string) {
	if exec.watermarkUpdater == nil {
		return
	}

	accountId := uint64(exec.spec.Accounts[0].GetId())
	taskId := exec.spec.TaskId
	ctx := defines.AttachAccountId(context.Background(), catalog.System_Account)

	// Query current watermarks from database
	sql := cdc.CDCSQLBuilder.GetTaskWatermarksSQL(accountId, taskId)
	res := exec.ie.Query(ctx, sql, ie.SessionOverrideOptions{})
	if res.Error() != nil {
		logutil.Warn(
			"cdc.frontend.task.log_watermarks_failed",
			zap.String("task-id", taskId),
			zap.String("phase", phase),
			zap.Error(res.Error()),
		)
		return
	}

	// Log each table's watermark
	for i := uint64(0); i < res.RowCount(); i++ {
		dbName, _ := res.GetString(ctx, i, 0)
		tableName, _ := res.GetString(ctx, i, 1)
		watermarkStr, _ := res.GetString(ctx, i, 2)

		logutil.Info(
			"cdc.frontend.task.watermark_snapshot",
			zap.String("task-id", taskId),
			zap.String("phase", phase),
			zap.String("db", dbName),
			zap.String("table", tableName),
			zap.String("watermark", watermarkStr),
		)
	}
}

// stopAllReaders stops all running readers and waits for them to exit
// This method ensures complete cleanup before Cancel/Pause returns
func (exec *CDCTaskExecutor) stopAllReaders() {
	if exec.runningReaders == nil {
		return
	}

	logutil.Info(
		"cdc.frontend.task.stop_all_readers_start",
		zap.String("task-id", exec.spec.TaskId),
	)

	// Step 1: Send stop signal to all readers
	readerCount := 0
	exec.runningReaders.Range(func(key, value interface{}) bool {
		reader := value.(cdc.ChangeReader)
		tableKey, _ := key.(string)
		closeStart := time.Now()
		logutil.Debug(
			"cdc.frontend.task.stop_reader_close_start",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("table", tableKey),
		)
		reader.Close()
		logutil.Debug(
			"cdc.frontend.task.stop_reader_close_done",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("table", tableKey),
			zap.Duration("cost", time.Since(closeStart)),
		)
		readerCount++
		return true
	})

	// Step 2: Wait for all readers to completely exit
	exec.runningReaders.Range(func(key, value interface{}) bool {
		reader := value.(cdc.ChangeReader)
		tableKey, _ := key.(string)
		waitStart := time.Now()
		logutil.Debug(
			"cdc.frontend.task.stop_reader_wait_start",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("table", tableKey),
		)
		done := make(chan struct{})
		go func() {
			reader.Wait()
			close(done)
		}()
		select {
		case <-done:
			logutil.Debug(
				"cdc.frontend.task.stop_reader_wait_done",
				zap.String("task-id", exec.spec.TaskId),
				zap.String("table", tableKey),
				zap.Duration("cost", time.Since(waitStart)),
			)
		case <-time.After(10 * time.Second):
			logutil.Warn(
				"cdc.frontend.task.stop_reader_wait_timeout",
				zap.String("task-id", exec.spec.TaskId),
				zap.String("table", tableKey),
				zap.Duration("waited", time.Since(waitStart)),
			)
		}
		return true
	})

	// Step 3: Clear the map
	exec.runningReaders.Range(func(key, value interface{}) bool {
		exec.runningReaders.Delete(key)
		return true
	})

	logutil.Debug(
		"cdc.frontend.task.stop_all_readers_complete",
		zap.String("task-id", exec.spec.TaskId),
		zap.Int("reader-count", readerCount),
	)
}

func (exec *CDCTaskExecutor) initAesKeyByInternalExecutor(ctx context.Context, accountId uint32) (err error) {
	if len(cdc.AesKey) > 0 {
		return nil
	}

	querySql := cdc.CDCSQLBuilder.GetDataKeySQL(uint64(accountId), cdc.InitKeyId)
	res := exec.ie.Query(ctx, querySql, ie.SessionOverrideOptions{})
	if res.Error() != nil {
		return res.Error()
	} else if res.RowCount() < 1 {
		return moerr.NewInternalErrorf(ctx, "no data key record for account %d", accountId)
	}

	encryptedKey, err := res.GetString(ctx, 0, 0)
	if err != nil {
		return err
	}

	cdc.AesKey, err = cdc.AesCFBDecodeWithKey(
		ctx, encryptedKey,
		[]byte(getGlobalPuWrapper(exec.cnUUID).SV.KeyEncryptionKey),
	)
	return
}

func (exec *CDCTaskExecutor) updateErrMsg(ctx context.Context, errMsg string) (err error) {
	accId := exec.spec.Accounts[0].GetId()
	state := cdc.CDCState_Running
	if errMsg != "" {
		state = cdc.CDCState_Failed
	}
	if len(errMsg) > cdc.CDCWatermarkErrMsgMaxLen {
		errMsg = errMsg[:cdc.CDCWatermarkErrMsgMaxLen]
	}

	sql := cdc.CDCSQLBuilder.UpdateTaskStateAndErrMsgSQL(
		uint64(accId),
		exec.spec.TaskId,
		state,
		errMsg,
	)
	return exec.ie.Exec(
		defines.AttachAccountId(ctx, catalog.System_Account),
		sql,
		ie.SessionOverrideOptions{},
	)
}

// clearAllTableErrors clears error messages for all tables in this task
// This is called during Resume to allow retrying tables that had non-retryable errors
// after user has fixed the underlying issues
func (exec *CDCTaskExecutor) clearAllTableErrors(ctx context.Context) error {
	accountId := uint64(exec.spec.Accounts[0].GetId())
	taskId := exec.spec.TaskId

	// Use SQL builder to construct safe SQL
	sql := cdc.CDCSQLBuilder.ClearTaskTableErrorsSQL(accountId, taskId)

	logutil.Info(
		"cdc.frontend.task.clear_table_errors",
		zap.String("task-id", taskId),
		zap.Uint64("account-id", accountId),
	)

	return exec.ie.Exec(
		defines.AttachAccountId(ctx, catalog.System_Account),
		sql,
		ie.SessionOverrideOptions{},
	)
}

func (exec *CDCTaskExecutor) handleNewTables(allAccountTbls map[uint32]cdc.TblMap) error {
	// lock to avoid create pipelines for the same table
	// 2025.7, this lock might be needless now
	exec.Lock()
	defer exec.Unlock()

	// if injected, we expect nothing
	if sleepSeconds, injected := objectio.CDCHandleSlowInjected(); injected {
		time.Sleep(time.Duration(sleepSeconds) * time.Second)
	}

	accountId := uint32(exec.spec.Accounts[0].GetId())
	ctx := defines.AttachAccountId(context.Background(), accountId)

	txnOp, err := cdc.GetTxnOp(ctx, exec.cnEngine, exec.cnTxnClient, "cdc-handleNewTables")
	if err != nil {
		logutil.Error(
			"cdc.frontend.task.handle_new_tables_get_txnop_failed",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.Error(err),
		)
		return err
	}
	defer func() {
		cdc.FinishTxnOp(ctx, err, txnOp, exec.cnEngine)
	}()
	err = exec.cnEngine.New(ctx, txnOp)

	// if injected, we expect the handleNewTables to keep retrying
	if objectio.CDCHandleErrInjected() {
		err = moerr.NewInternalError(context.Background(), "CDC_HANDLENEWTABLES_ERR")
	}

	if err != nil {
		logutil.Error(
			"cdc.frontend.task.handle_new_tables_new_engine_failed",
			zap.String("task-id", exec.spec.TaskId),
			zap.String("task-name", exec.spec.TaskName),
			zap.Error(err),
		)
		return err
	}

	// Track failed tables for better error reporting
	failedTables := make(map[string]error)
	successCount := 0

	for key, info := range allAccountTbls[accountId] {
		// already running
		if val, ok := exec.runningReaders.Load(key); ok {
			if reader, ok := val.(cdc.ChangeReader); ok {
				readerInfo := reader.GetTableInfo()
				// wait the old reader to stop
				if info.OnlyDiffinTblId(readerInfo) {
					logutil.Info(
						"cdc.frontend.task.wait_old_reader",
						zap.String("table", key),
						zap.Uint64("old-table-id", readerInfo.SourceTblId),
						zap.Uint64("new-table-id", info.SourceTblId),
					)
					waitChan := make(chan struct{})
					go func() {
						defer close(waitChan)
						reader.Wait()
					}()
					<-waitChan
				} else {
					continue
				}
			}
		}

		if exec.exclude != nil && exec.exclude.MatchString(key) {
			continue
		}

		newTableInfo := info.Clone()
		if !exec.matchAnyPattern(key, newTableInfo) {
			continue
		}
		hasError, err := GetTableErrMsg(ctx, accountId, exec.ie, exec.spec.TaskId, newTableInfo)
		if err != nil {
			logutil.Error(
				"cdc.frontend.task.get_table_errmsg_failed",
				zap.String("task-name", exec.spec.TaskName),
				zap.String("table", key),
				zap.Error(err),
			)
			// Don't return immediately - try other tables
			failedTables[key] = err
			continue
		}
		if hasError {
			continue
		}

		logutil.Info(
			"cdc.frontend.task.new_table_detected",
			zap.String("task-name", exec.spec.TaskName),
			zap.String("table", key),
			zap.String("source-db", newTableInfo.SourceDbName),
			zap.String("source-table", newTableInfo.SourceTblName),
		)
		if err = exec.addExecPipelineForTable(ctx, newTableInfo, txnOp); err != nil {
			logutil.Error(
				"cdc.frontend.task.add_exec_pipeline_failed",
				zap.String("task-name", exec.spec.TaskName),
				zap.String("table", key),
				zap.Error(err),
			)
			// Persist error to database for this table
			if exec.watermarkUpdater != nil {
				watermarkKey := cdc.WatermarkKey{
					AccountId: uint64(exec.spec.Accounts[0].GetId()),
					TaskId:    exec.spec.TaskId,
					DBName:    newTableInfo.SourceDbName,
					TableName: newTableInfo.SourceTblName,
				}
				errorCtx := &cdc.ErrorContext{
					IsRetryable: false, // Pipeline creation errors are not retryable by default
				}
				if updateErr := exec.watermarkUpdater.UpdateWatermarkErrMsg(ctx, &watermarkKey, err.Error(), errorCtx); updateErr != nil {
					logutil.Warn(
						"cdc.frontend.task.persist_table_error_failed",
						zap.String("table", key),
						zap.Error(updateErr),
					)
				}
			}
			// Don't return immediately - try other tables
			failedTables[key] = err
			continue
		}

		info.IdChanged = newTableInfo.IdChanged
		successCount++
		logutil.Info(
			"cdc.frontend.task.add_exec_pipeline_success",
			zap.String("task-name", exec.spec.TaskName),
			zap.String("table", key),
		)
	}

	// Log summary
	if len(failedTables) > 0 {
		failedKeys := make([]string, 0, len(failedTables))
		for k := range failedTables {
			failedKeys = append(failedKeys, k)
		}
		logutil.Warn(
			"cdc.frontend.task.add_exec_pipeline_summary",
			zap.String("task-name", exec.spec.TaskName),
			zap.Int("success-count", successCount),
			zap.Int("failed-count", len(failedTables)),
			zap.Strings("failed-tables", failedKeys),
		)
		// Return error to trigger retry by TableDetector
		return moerr.NewInternalErrorf(ctx, "failed to add pipeline for %d tables", len(failedTables))
	}

	return nil
}

var GetTableErrMsg = func(
	ctx context.Context,
	accountId uint32,
	ieExecutor ie.InternalExecutor,
	taskId string,
	tbl *cdc.DbTableInfo) (
	hasError bool, err error,
) {
	ctx = defines.AttachAccountId(ctx, catalog.System_Account)
	sql := cdc.CDCSQLBuilder.GetTableErrMsgSQL(uint64(accountId), taskId, tbl.SourceDbName, tbl.SourceTblName)
	res := ieExecutor.Query(ctx, sql, ie.SessionOverrideOptions{})
	if res.Error() != nil {
		return false, res.Error()
	} else if res.RowCount() < 1 {
		return false, nil
	}

	errMsg, err := res.GetString(ctx, 0, 0)
	if err != nil {
		return false, err
	}
	if errMsg == "" {
		return false, nil
	}

	// Parse error metadata using unified parser
	metadata := cdc.ParseErrorMetadata(errMsg)
	if metadata == nil {
		return false, nil
	}

	// Use unified retry logic
	if cdc.ShouldRetry(metadata) {
		// Log detailed retry information
		if metadata.IsRetryable {
			logutil.Info(
				"cdc.frontend.task.retryable_table_error",
				zap.String("db", tbl.SourceDbName),
				zap.String("table", tbl.SourceTblName),
				zap.Int("retry-count", metadata.RetryCount),
				zap.Int("max-retry", cdc.MaxRetryCount),
				zap.String("message", metadata.Message),
			)
		} else {
			// Expired non-retryable error
			age := time.Since(metadata.FirstSeen)
			logutil.Info(
				"cdc.frontend.task.expired_non_retryable_error",
				zap.String("db", tbl.SourceDbName),
				zap.String("table", tbl.SourceTblName),
				zap.Duration("age", age),
				zap.String("message", metadata.Message),
			)
		}
		return false, nil
	}

	// Cannot retry
	if metadata.IsRetryable {
		// Exceeded max retry count
		logutil.Warn(
			"cdc.frontend.task.max_retry_exceeded",
			zap.String("db", tbl.SourceDbName),
			zap.String("table", tbl.SourceTblName),
			zap.Int("retry-count", metadata.RetryCount),
			zap.String("message", metadata.Message),
		)
	} else {
		// Fresh non-retryable error
		age := time.Since(metadata.FirstSeen)
		logutil.Info(
			"cdc.frontend.task.permanent_table_error",
			zap.String("db", tbl.SourceDbName),
			zap.String("table", tbl.SourceTblName),
			zap.Duration("age", age),
			zap.String("message", metadata.Message),
		)
	}

	hasError = true
	return
}

func (exec *CDCTaskExecutor) matchAnyPattern(key string, info *cdc.DbTableInfo) bool {
	match := func(s, p string) bool {
		if p == cdc.CDCPitrGranularity_All {
			return true
		}
		return s == p
	}

	db, table := cdc.SplitDbTblKey(key)
	for _, pt := range exec.tables.Pts {
		if match(db, pt.Source.Database) && match(table, pt.Source.Table) {
			// complete sink info
			info.SinkDbName = pt.Sink.Database
			if info.SinkDbName == cdc.CDCPitrGranularity_All {
				info.SinkDbName = db
			}
			info.SinkTblName = pt.Sink.Table
			if info.SinkTblName == cdc.CDCPitrGranularity_All {
				info.SinkTblName = table
			}
			return true
		}
	}
	return false
}

// reader ----> sinker ----> remote db
func (exec *CDCTaskExecutor) addExecPipelineForTable(
	ctx context.Context,
	info *cdc.DbTableInfo,
	txnOp client.TxnOperator,
) (err error) {
	// for ut
	if objectio.CDCAddExecConsumeTruncateInjected() {
		info.IdChanged = false
		return nil
	}

	if objectio.CDCAddExecErrInjected() {
		return moerr.NewInternalErrorNoCtx("CDC_AddExecPipelineForTable_ERR")
	}

	// step 1. init watermarkUpdater
	// get watermark from db
	watermark := exec.startTs
	if exec.noFull {
		watermark = types.TimestampToTS(txnOp.SnapshotTS())
	}
	watermarkKey := cdc.WatermarkKey{
		AccountId: uint64(exec.spec.Accounts[0].GetId()),
		TaskId:    exec.spec.TaskId,
		DBName:    info.SourceDbName,
		TableName: info.SourceTblName,
	}
	if watermark, err = exec.watermarkUpdater.GetOrAddCommitted(
		ctx,
		&watermarkKey,
		&watermark,
	); err != nil {
		return err
	}

	// Note: Do NOT clear err_msg here
	// Error should only be cleared when reader successfully syncs data (lazy, eventual consistency)
	// This allows retry count to accumulate properly (1→2→3→4)
	// If cleared here, retry count would reset on every rebuild, making max retry limit ineffective

	tableDef, err := cdc.GetTableDef(ctx, txnOp, exec.cnEngine, info.SourceTblId)
	if err != nil {
		return
	}

	// step 2. new sinker
	sinker, err := cdc.NewSinker(
		exec.sinkUri,
		uint64(exec.spec.Accounts[0].GetId()),
		exec.spec.TaskId,
		info,
		exec.watermarkUpdater,
		tableDef,
		cdc.CDCDefaultRetryTimes,
		cdc.CDCDefaultRetryDuration,
		exec.activeRoutine,
		uint64(exec.additionalConfig[cdc.CDCTaskExtraOptions_MaxSqlLength].(float64)),
		exec.additionalConfig[cdc.CDCTaskExtraOptions_SendSqlTimeout].(string),
	)
	if err != nil {
		return err
	}

	// step 3. new reader (using V2 tableChangeStream)
	frequencyStr := exec.additionalConfig[cdc.CDCTaskExtraOptions_Frequency].(string)
	frequency := cdc.ParseFrequencyToDuration(frequencyStr)
	reader := cdc.NewTableChangeStream(
		exec.cnTxnClient,
		exec.cnEngine,
		exec.mp,
		exec.packerPool,
		uint64(exec.spec.Accounts[0].GetId()),
		exec.spec.TaskId,
		info,
		sinker,
		exec.watermarkUpdater,
		tableDef,
		exec.additionalConfig[cdc.CDCTaskExtraOptions_InitSnapshotSplitTxn].(bool),
		exec.runningReaders,
		exec.startTs,
		exec.endTs,
		exec.noFull,
		frequency,
	)

	// step 4. start goroutines (sinker first, then reader)
	// Note: Reader will register itself in runningReaders during Run()
	// to prevent duplicate readers (see TableChangeStream.Run line 287)
	go sinker.Run(ctx, exec.activeRoutine)
	go reader.Run(ctx, exec.activeRoutine)

	return
}

func (exec *CDCTaskExecutor) retrieveCdcTask(ctx context.Context) error {
	ctx = defines.AttachAccountId(ctx, catalog.System_Account)

	accId := exec.spec.Accounts[0].GetId()
	sql := cdc.CDCSQLBuilder.GetTaskSQL(accId, exec.spec.TaskId)
	res := exec.ie.Query(ctx, sql, ie.SessionOverrideOptions{})
	if res.Error() != nil {
		return res.Error()
	}

	if res.RowCount() < 1 {
		return moerr.NewInternalErrorf(ctx, "none cdc task for %d %s", accId, exec.spec.TaskId)
	} else if res.RowCount() > 1 {
		return moerr.NewInternalErrorf(ctx, "duplicate cdc task for %d %s", accId, exec.spec.TaskId)
	}

	//sink_type
	sinkTyp, err := res.GetString(ctx, 0, 1)
	if err != nil {
		return err
	}

	if sinkTyp != cdc.CDCSinkType_Console {
		//sink uri
		jsonSinkUri, err := res.GetString(ctx, 0, 0)
		if err != nil {
			return err
		}

		if err = cdc.JsonDecode(jsonSinkUri, &exec.sinkUri); err != nil {
			return err
		}

		//sink_password
		sinkPwd, err := res.GetString(ctx, 0, 2)
		if err != nil {
			return err
		}

		// TODO replace with creatorAccountId
		if err = exec.initAesKeyByInternalExecutor(ctx, catalog.System_Account); err != nil {
			return err
		}

		if exec.sinkUri.Password, err = cdc.AesCFBDecode(ctx, sinkPwd); err != nil {
			return err
		}
	}

	//update sink type after deserialize
	exec.sinkUri.SinkTyp = sinkTyp

	// tables
	jsonTables, err := res.GetString(ctx, 0, 3)
	if err != nil {
		return err
	}

	if err = cdc.JsonDecode(jsonTables, &exec.tables); err != nil {
		return err
	}

	// exclude
	exclude, err := res.GetString(ctx, 0, 4)
	if err != nil {
		return err
	}
	if exclude != "" {
		if exec.exclude, err = regexp.Compile(exclude); err != nil {
			return err
		}
	}

	// startTs
	startTs, err := res.GetString(ctx, 0, 5)
	if err != nil {
		return err
	}
	if exec.startTs, err = CDCStrToTS(startTs); err != nil {
		return err
	}
	// endTs
	endTs, err := res.GetString(ctx, 0, 6)
	if err != nil {
		return err
	}
	if exec.endTs, err = CDCStrToTS(endTs); err != nil {
		return err
	}

	// noFull
	noFull, err := res.GetString(ctx, 0, 7)
	if err != nil {
		return err
	}
	exec.noFull, _ = strconv.ParseBool(noFull)

	// additionalConfig
	additionalConfigStr, err := res.GetString(ctx, 0, 8)
	if err != nil {
		return err
	}
	return json.Unmarshal([]byte(additionalConfigStr), &exec.additionalConfig)
}
