package mongodb

import (
	"context"
	"github.com/pingcap/errors"
	"github.com/pingcap/log"
	"github.com/pingcap/tiflow/cdc/model"
	"github.com/pingcap/tiflow/cdc/sink/dmlsink"
	"github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/transformer"
	"github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/transformer/columnselector"
	"github.com/pingcap/tiflow/cdc/sink/metrics"
	"github.com/pingcap/tiflow/cdc/sink/tablesink/state"
	"github.com/pingcap/tiflow/cdc/sink/util"
	"github.com/pingcap/tiflow/pkg/config"
	"github.com/pingcap/tiflow/pkg/sink"
	"github.com/pingcap/tiflow/pkg/sink/codec"
	"github.com/pingcap/tiflow/pkg/sink/codec/builder"
	"go.uber.org/atomic"
	"go.uber.org/zap"
	"net/url"
	"sync"
)

// Assert EventSink[E event.TableEvent] implementation
// var _ dmlsink.EventSink[*model.RowChangedEvent] = (*DMLSink)(nil)
var _ dmlsink.EventSink[*model.SingleTableTxn] = (*dmlSink)(nil)

// DMLSink is a black hole sink.
type dmlSink struct {
	id       model.ChangeFeedID
	protocol config.Protocol
	alive    struct {
		sync.RWMutex
		transformer transformer.Transformer
		worker      *worker
		isDead      bool
	}
	//adminClient kafka.ClusterAdminClient
	ctx    context.Context
	cancel context.CancelCauseFunc
	wg     sync.WaitGroup
	dead   chan struct{}
	scheme string
}

func NewMongodbDMLSink(ctx context.Context, changefeedID model.ChangeFeedID, sinkURI *url.URL, replicaConfig *config.ReplicaConfig, errCh chan error) (_ *dmlSink) {
	log.Info("Starting mongodb DML sink ...",
		zap.String("namespace", changefeedID.Namespace),
		zap.String("changefeed", changefeedID.ID))

	protocol, err := util.GetProtocol("maxwell")
	if err != nil {
		return nil
	}
	if !util.IsMongodbSupportedProtocols(protocol) {
		return nil
	}

	scheme := sink.GetScheme(sinkURI)
	trans, err := columnselector.New(replicaConfig)
	if err != nil {
		return nil
	}

	encoderConfig, err := util.GetEncoderConfig(changefeedID, sinkURI, protocol, replicaConfig,
		config.DefaultMaxMessageBytes)
	if err != nil {
		return nil
	}

	encoderBuilder, err := builder.NewRowEventEncoderBuilder(ctx, encoderConfig)
	if err != nil {
		return nil
	}

	encoderGroup := codec.NewEncoderGroup(replicaConfig.Sink, encoderBuilder, changefeedID)

	s := newDMLSink(ctx, changefeedID, trans, encoderGroup, protocol, scheme, errCh)
	return s
}

func newDMLSink(ctx context.Context, changefeedID model.ChangeFeedID, transformer transformer.Transformer, encoderGroup codec.EncoderGroup, protocol config.Protocol, scheme string, errCh chan error) *dmlSink {
	ctx, cancel := context.WithCancelCause(ctx)
	statistics := metrics.NewStatistics(ctx, changefeedID, sink.RowSink)
	worker := newWorker(changefeedID, protocol, encoderGroup, statistics)

	s := &dmlSink{
		id:       changefeedID,
		protocol: protocol,
		//adminClient: adminClient,
		ctx:    ctx,
		cancel: cancel,
		dead:   make(chan struct{}),
		scheme: scheme,
	}

	s.alive.transformer = transformer
	s.alive.worker = worker

	// 生成一个由worker发送消息的程序。
	s.wg.Add(1)
	go func() {
		defer s.wg.Done()
		err := s.alive.worker.run(ctx)

		s.alive.Lock()
		s.alive.isDead = true
		s.alive.worker.close()
		s.alive.Unlock()
		close(s.dead)

		if err != nil {
			if errors.Cause(err) == context.Canceled {
				err = context.Cause(ctx)
			}
			select {
			case errCh <- err:
				log.Warn("mongodb dml sink meet error",
					zap.String("namespace", s.id.Namespace),
					zap.String("changefeed", s.id.ID),
					zap.Error(err))
			default:
				log.Info("mongodb dml sink meet error, ignored",
					zap.String("namespace", s.id.Namespace),
					zap.String("changefeed", s.id.ID),
					zap.Error(err))
			}
		}
	}()

	return s
}

func (s *dmlSink) WriteEvents(txns ...*dmlsink.CallbackableEvent[*model.SingleTableTxn]) error {
	s.alive.RLock()
	defer s.alive.RUnlock()
	if s.alive.isDead {
		return errors.Trace(errors.New("dead dmlSink"))
	}
	// Because we split txn to rows when sending to the MQ.
	// So we need to convert the txn level callback to row level callback.
	toRowCallback := func(txnCallback func(), totalCount uint64) func() {
		var calledCount atomic.Uint64
		// The callback of the last row will trigger the callback of the txn.
		return func() {
			if calledCount.Inc() == totalCount {
				txnCallback()
			}
		}
	}

	for _, txn := range txns {
		if txn.GetTableSinkState() != state.TableSinkSinking {
			// The table where the event comes from is in stopping, so it's safe
			// to drop the event directly.
			txn.Callback()
			continue
		}
		rowCallback := toRowCallback(txn.Callback, uint64(len(txn.Event.Rows)))
		for _, row := range txn.Event.Rows {
			topic := s.id.ID //s.alive.eventRouter.GetTopicForRowChange(row)
			partitionNum := int32(1)
			schema := row.TableInfo.TableName.Schema
			tableName := row.TableInfo.TableName.Table
			partitionKey := schema + tableName

			err := s.alive.transformer.Apply(row)
			if err != nil {
				s.cancel(err)
				return errors.Trace(err)
			}

			s.alive.worker.msgChan.In() <- msgEvent{
				key: model.TopicPartitionKey{
					Topic:          topic,
					Partition:      1,
					PartitionKey:   partitionKey,
					TotalPartition: partitionNum,
				},
				rowEvent: &dmlsink.RowChangeCallbackableEvent{
					Event:     row,
					Callback:  rowCallback,
					SinkState: txn.SinkState,
				},
			}
		}
	}
	return nil
}

// Scheme return the scheme of the sink.
func (s *dmlSink) Scheme() string {
	return sink.MongodbScheme
}

// Close do nothing.
func (s *dmlSink) Close() {}

// Dead returns a checker.
func (s *dmlSink) Dead() <-chan struct{} {
	return make(chan struct{})
}
