package mongodb

import (
	"context"
	"github.com/pingcap/errors"
	"github.com/pingcap/log"
	"github.com/pingcap/tiflow/cdc/model"
	"github.com/pingcap/tiflow/cdc/sink/dmlsink"
	"github.com/pingcap/tiflow/cdc/sink/metrics"
	"github.com/pingcap/tiflow/cdc/sink/tablesink/state"
	"github.com/pingcap/tiflow/pkg/chann"
	"github.com/pingcap/tiflow/pkg/config"
	"github.com/pingcap/tiflow/pkg/sink/codec"
	"github.com/pingcap/tiflow/pkg/sink/mongodb"
	"github.com/prometheus/client_golang/prometheus"
	"go.uber.org/zap"
	"golang.org/x/sync/errgroup"
	"time"
)

const (
	// batchSize是批处理消息的最大数目。
	batchSize = 2048
	// batchInterval是工作程序收集一批消息的时间间隔，它不应该太大，否则会导致高延迟。
	batchInterval = 15 * time.Millisecond
)

type msgEvent struct {
	key      model.TopicPartitionKey
	rowEvent *dmlsink.RowChangeCallbackableEvent
}

type worker struct {
	changeFeedID model.ChangeFeedID
	protocol     config.Protocol
	msgChan      *chann.DrainableChann[msgEvent]
	ticker       *time.Ticker
	encoderGroup codec.EncoderGroup

	metricMongodbWorkerSendMessageDuration prometheus.Observer
	metricMongodbWorkerBatchSize           prometheus.Observer
	metricMongodbWorkerBatchDuration       prometheus.Observer
	// statistics is used to record DML metrics.
	statistics *metrics.Statistics
}

func newWorker(
	id model.ChangeFeedID,
	protocol config.Protocol,
	encoderGroup codec.EncoderGroup,
	statistics *metrics.Statistics,
) *worker {
	w := &worker{
		changeFeedID: id,
		protocol:     protocol,
		msgChan:      chann.NewAutoDrainChann[msgEvent](),
		ticker:       time.NewTicker(batchInterval),
		encoderGroup: encoderGroup,
		//metricMQWorkerSendMessageDuration: mq.WorkerSendMessageDuration.WithLabelValues(id.Namespace, id.ID),
		//metricMQWorkerBatchSize:           mq.WorkerBatchSize.WithLabelValues(id.Namespace, id.ID),
		//metricMQWorkerBatchDuration:       mq.WorkerBatchDuration.WithLabelValues(id.Namespace, id.ID),
		statistics: statistics,
	}

	return w
}

func (w *worker) run(ctx context.Context) (retErr error) {
	defer func() {
		w.ticker.Stop()
		log.Info("Mongodb sink worker exited", zap.Error(retErr),
			zap.String("namespace", w.changeFeedID.Namespace),
			zap.String("changefeed", w.changeFeedID.ID),
			zap.String("protocol", w.protocol.String()),
		)
	}()

	g, ctx := errgroup.WithContext(ctx)
	g.Go(func() error {
		return w.encoderGroup.Run(ctx)
	})
	g.Go(func() error {
		if w.protocol.IsBatchEncode() {
			return w.batchEncodeRun(ctx)
		}
		return w.nonBatchEncodeRun(ctx)
	})
	g.Go(func() error {
		return w.saveMessageData(ctx)
	})
	return g.Wait()
}

// 立即将事件添加到编码器组。
func (w *worker) nonBatchEncodeRun(ctx context.Context) error {
	log.Info("Mongodb sink non batch worker started",
		zap.String("namespace", w.changeFeedID.Namespace),
		zap.String("changefeed", w.changeFeedID.ID),
		zap.String("protocol", w.protocol.String()),
	)

	for {
		select {
		case <-ctx.Done():
			return errors.Trace(ctx.Err())
		case event, ok := <-w.msgChan.Out():
			if !ok {
				log.Warn("Mongodb sink flush worker channel closed",
					zap.String("namespace", w.changeFeedID.Namespace),
					zap.String("changefeed", w.changeFeedID.ID))
				return nil
			}
			if event.rowEvent.GetTableSinkState() != state.TableSinkSinking {
				event.rowEvent.Callback()
				log.Debug("Skip event of stopped table",
					zap.String("namespace", w.changeFeedID.Namespace),
					zap.String("changefeed", w.changeFeedID.ID),
					zap.Any("event", event))
				continue
			}
			if err := w.encoderGroup.AddEvents(
				ctx,
				event.key,
				event.rowEvent); err != nil {
				return errors.Trace(err)
			}
		}
	}

}

// 将消息批量收集并添加到编码器组中。
func (w *worker) batchEncodeRun(ctx context.Context) (retErr error) {
	log.Info("Mongodb sink batch worker started",
		zap.String("namespace", w.changeFeedID.Namespace),
		zap.String("changefeed", w.changeFeedID.ID),
		zap.String("protocol", w.protocol.String()),
	)

	msgsBuf := make([]msgEvent, batchSize)
	for {
		//start := time.Now()
		msgCount, err := w.batch(ctx, msgsBuf, batchInterval)
		if err != nil {
			return errors.Trace(err)
		}
		if msgCount == 0 {
			continue
		}

		//w.metricMQWorkerBatchSize.Observe(float64(msgCount))
		//w.metricMQWorkerBatchDuration.Observe(time.Since(start).Seconds())

		msgs := msgsBuf[:msgCount]
		// 在将消息添加到编码器组之前，按其TopicPartitionKey对消息进行分组。
		groupedMsgs := w.group(msgs)
		for key, msg := range groupedMsgs {
			if err := w.encoderGroup.AddEvents(ctx, key, msg...); err != nil {
				return errors.Trace(err)
			}
		}
	}
}

// 从w.msgChan收集一批消息到缓冲区，返回收集到的消息数。
// 注意:它将阻塞，直到至少一个消息被接收。
func (w *worker) batch(
	ctx context.Context, buffer []msgEvent, flushInterval time.Duration,
) (int, error) {
	msgCount := 0
	maxBatchSize := len(buffer)

	// 至少需要接收一条消息，否则将被中断，否则将导致空转。
	select {
	case <-ctx.Done():
		return msgCount, ctx.Err()
	case msg, ok := <-w.msgChan.Out():
		if !ok {
			log.Warn("MQ sink flush worker channel closed")
			return msgCount, nil
		}
		if msg.rowEvent != nil {
			w.statistics.ObserveRows(msg.rowEvent.Event)
			buffer[msgCount] = msg
			msgCount++
		}
	}

	// 重置计时器以开始新的批处理。
	// 我们需要在达到间隔时停止批处理。
	w.ticker.Reset(flushInterval)
	for {
		select {
		case <-ctx.Done():
			return msgCount, ctx.Err()
		case msg, ok := <-w.msgChan.Out():
			if !ok {
				log.Warn("Mongodb sink flush worker channel closed")
				return msgCount, nil
			}

			if msg.rowEvent != nil {
				w.statistics.ObserveRows(msg.rowEvent.Event)
				buffer[msgCount] = msg
				msgCount++
			}

			if msgCount >= maxBatchSize {
				return msgCount, nil
			}
		case <-w.ticker.C:
			return msgCount, nil
		}
	}
}

// 分组按消息的键对消息进行分组。
func (w *worker) group(
	msgs []msgEvent,
) map[model.TopicPartitionKey][]*dmlsink.RowChangeCallbackableEvent {
	groupedMsgs := make(map[model.TopicPartitionKey][]*dmlsink.RowChangeCallbackableEvent)
	for _, msg := range msgs {
		// 当表停止时跳过此事件
		if msg.rowEvent.GetTableSinkState() != state.TableSinkSinking {
			msg.rowEvent.Callback()
			log.Debug("Skip event of stopped table", zap.Any("event", msg.rowEvent))
			continue
		}
		if _, ok := groupedMsgs[msg.key]; !ok {
			groupedMsgs[msg.key] = make([]*dmlsink.RowChangeCallbackableEvent, 0)
		}
		groupedMsgs[msg.key] = append(groupedMsgs[msg.key], msg.rowEvent)
	}
	return groupedMsgs
}

func (w *worker) saveMessageData(ctx context.Context) error {
	ticker := time.NewTicker(15 * time.Second)
	metric := codec.EncoderGroupOutputChanSizeGauge.
		WithLabelValues(w.changeFeedID.Namespace, w.changeFeedID.ID)
	defer func() {
		ticker.Stop()
		codec.EncoderGroupOutputChanSizeGauge.
			DeleteLabelValues(w.changeFeedID.Namespace, w.changeFeedID.ID)
	}()

	var err error
	outCh := w.encoderGroup.Output()
	for {
		select {
		case <-ctx.Done():
			return errors.Trace(ctx.Err())
		case <-ticker.C:
			metric.Set(float64(len(outCh)))
		case future, ok := <-outCh:
			if !ok {
				log.Warn("Mongodb sink encoder's output channel closed",
					zap.String("namespace", w.changeFeedID.Namespace),
					zap.String("changefeed", w.changeFeedID.ID))
				return nil
			}
			if err = future.Ready(ctx); err != nil {
				return errors.Trace(err)
			}
			for _, message := range future.Messages {
				//start := time.Now()
				if err = w.statistics.RecordBatchExecution(func() (int, int64, error) {
					//将消息路由到不同的分区，在mongodb下可以将这个PartitionKey当作一个表名
					message.SetPartitionKey(future.Key.PartitionKey)
					//if err := w.producer.AsyncSaveMessage(
					//	ctx,
					//	future.Key.Topic,
					//	future.Key.Partition,
					//	message); err != nil {
					//	return 0, 0, err
					//}

					data := &mongodb.MongoMessage{
						Payload: message.Value,
						Schema:  message.GetSchema(),
						Table:   message.GetTable(),
						Ts:      message.Ts,
					}

					log.Info("Mongodb Sink Encoded Message:", zap.Any("data", data))

					return message.GetRowsCount(), int64(message.Length()), nil
				}); err != nil {
					return err
				}

				//w.metricMQWorkerSendMessageDuration.Observe(time.Since(start).Seconds())
			}
		}
	}
}

func (w *worker) close() {
	w.msgChan.CloseAndDrain()
	//这里需要将mongodb client的连接关闭吗？
	//w.producer.Close()
}
