package kqutil

import (
	"context"
	"paper-pro/pkg/kqutil/kqbuffer"
	"time"

	"github.com/segmentio/kafka-go"
	"github.com/zeromicro/go-zero/core/logx"
	"github.com/zeromicro/go-zero/core/queue"
	"github.com/zeromicro/go-zero/core/syncx"
	"github.com/zeromicro/go-zero/core/threading"
)

type (
	WorkConsumer interface {
		Consume(ctx context.Context, msgs []*kafka.Message) error
	}

	MwConsumerConf struct {
		KafkaConf struct {
			Brokers []string
			GroupID string
			Topic   string
		}

		OffsetTrackerCapacity int
		CollectInterval       int // 单位：秒
		ComposeBatchSize      int
		WorkersNum            int
		CommitInterval        int // 单位：秒
	}

	MultiWorkerConsumer struct {
		c   MwConsumerConf
		ctx context.Context
		logx.Logger
		cancelFunc func()

		consumer WorkConsumer

		on *syncx.AtomicBool

		kafkaReader    *kafka.Reader
		offsetTracker  *OffsetTracker
		colletChan     chan *kafka.Message
		composeChan    chan *kqbuffer.KqBatchMsg
		commitChan     chan struct{}
		kqRoutines     *threading.RoutineGroup
		workerRoutines *threading.RoutineGroup
	}
)

func NewMultiWorkerConsumer(c MwConsumerConf, ctx context.Context, consumer WorkConsumer) queue.MessageQueue {
	KafkaConf := kafka.ReaderConfig{
		Brokers: c.KafkaConf.Brokers,
		GroupID: c.KafkaConf.GroupID,
		Topic:   c.KafkaConf.Topic,
	}
	return &MultiWorkerConsumer{
		c:      c,
		ctx:    ctx,
		Logger: logx.WithContext(ctx),

		consumer: consumer,

		on: syncx.ForAtomicBool(true),

		kafkaReader:    kafka.NewReader(KafkaConf),
		offsetTracker:  NewOffsetTracker(c.OffsetTrackerCapacity, 5), // 提交阻塞时，最多重试5次
		colletChan:     make(chan *kafka.Message, 16),
		composeChan:    make(chan *kqbuffer.KqBatchMsg, 4),
		commitChan:     make(chan struct{}, 1),
		kqRoutines:     threading.NewRoutineGroup(),
		workerRoutines: threading.NewRoutineGroup(),
	}
}

func (mc *MultiWorkerConsumer) Start() {
	ctx, cancel := context.WithCancel(mc.ctx)
	mc.cancelFunc = cancel
	mc.kqRoutines.Run(func() {
		err := mc.fetcher(ctx)
		if err != nil {
			mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】fetcher end error: %v", err)
		}
	})
	mc.kqRoutines.Run(func() {
		err := mc.collector(ctx)
		if err != nil {
			mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】collector end error: %v", err)
		}
	})
	mc.kqRoutines.Run(func() {
		err := mc.composer()
		if err != nil {
			mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】composer end error: %v", err)
		}
	})
	mc.kqRoutines.Run(func() {
		err := mc.commiter(ctx)
		if err != nil {
			mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】commiter end error: %v", err)
		}
	})
	mc.startWorkersAndWait(ctx)
	mc.kqRoutines.Wait()
}

func (mc *MultiWorkerConsumer) startWorkersAndWait(ctx context.Context) {
	defer func() {
		mc.workerRoutines.Wait()
		close(mc.commitChan) // 关闭commiter
		for len(mc.composeChan) > 0 {
			<-mc.composeChan // 清空 commitChan，让 composer 能正常关闭
		}
	}()
	for i := 0; i < mc.c.WorkersNum; i++ {
		mc.workerRoutines.Run(func() {
			err := mc.worker(ctx)
			if err != nil {
				mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】worker end error: %v", err)
			}
		})
	}
}

func (mc *MultiWorkerConsumer) Stop() {
	mc.dispose()
	if mc.cancelFunc != nil {
		mc.cancelFunc()
	}
	mc.offsetTracker.SignalStop()
}

func (mc *MultiWorkerConsumer) dispose() {
	mc.on.Set(false)
}

func (mc *MultiWorkerConsumer) fetcher(ctx context.Context) (err error) {
	defer close(mc.colletChan)
	for mc.on.True() {
		msg, err := mc.kafkaReader.FetchMessage(ctx)
		if err != nil {
			mc.Errorf("【kqutil.MultiWorkerConsumer】fetcher: kafkaReader FetchMessage error: %v", err)
			continue
		}
		mc.colletChan <- &msg
	}
	return
}

func (mc *MultiWorkerConsumer) collector(ctx context.Context) (err error) {
	defer mc.Stop()
	duration := time.Duration(mc.c.CollectInterval) * time.Second
	ticker := time.NewTicker(duration)
	defer ticker.Stop()
	for mc.on.True() {
		select {
		case <-ctx.Done():
			return nil
		case <-ticker.C:
			mc.offsetTracker.TrySetWriteToRead()
		case msg, ok := <-mc.colletChan:
			if !ok {
				return
			}
			isSetRead, e := mc.offsetTracker.PushMsg(msg)
			if e == ErrSignalStop {
				return // 关闭
			}
			if e != nil {
				err = e
				mc.Errorf("【kqutil.MultiWorkerConsumer】collector: PushMsg error: %v", err)
				continue
			}
			if isSetRead {
				ticker.Reset(duration)
			}
		}
	}
	return
}

func (mc *MultiWorkerConsumer) composer() (err error) {
	defer func() {
		mc.Stop()
		// 忽略 colletChan offsetTracker 中未处理的数据
		close(mc.composeChan)
	}()
	for mc.on.True() {
		if batch, e := mc.offsetTracker.BatchMsg(mc.c.ComposeBatchSize); e == nil {
			mc.composeChan <- batch
		} else if e == ErrSignalStop {
			return err
		} else {
			err = e
			mc.Errorf("【kqutil.MultiWorkerConsumer】composer: BatchMsg error: %v", err)
		}
	}
	return
}

func (mc *MultiWorkerConsumer) worker(ctx context.Context) (err error) {
	defer mc.Logger.Infof("【kqutil.MultiWorkerConsumer】worker: exit") // 不恢复，只记录
	for mc.on.True() {
		select {
		case <-ctx.Done():
			return nil
		case batchMsg, ok := <-mc.composeChan:
			if !ok {
				return
			}
			err = mc.consumer.Consume(ctx, batchMsg.Msgs)
			if err != nil {
				mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】worker: consumer Consume error: %v", err)
				// 只记录，不退出
			}
			var countWaitReady int
			countWaitReady, err = mc.offsetTracker.ReadyMsg(batchMsg)
			if err != nil {
				mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】worker: ReadyMsg error: %v", err)
				// 这是严重问题！可以考虑直接让程序崩溃
			}
			if countWaitReady == 0 {
				mc.notifyCommit()
			}
		}
	}
	return
}

func (mc *MultiWorkerConsumer) notifyCommit() {
	select {
	case mc.commitChan <- struct{}{}:
	default:
	}
}

func (mc *MultiWorkerConsumer) alreadyCommit() {
	select {
	case <-mc.commitChan:
	default:
	}
}

func (mc *MultiWorkerConsumer) commiter(ctx context.Context) (err error) {
	defer mc.Stop()
	duration := time.Duration(mc.c.CommitInterval) * time.Second
	ticker := time.NewTicker(duration)
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			err = mc.commiteKqOffset(ctx)
			if err != nil {
				mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】commiter: commiteKqOffset: error: %v", err)
				return // ErrCommitPending 提交阻塞，连续多次，在缓冲非空情况下提交失败，代表很有可能某些数据处理失败
			}
			mc.alreadyCommit()
		case _, ok := <-mc.commitChan:
			if !ok { // 监听通道关闭退出
				mc.commiteKqOffset(ctx)
				return
			}
			err = mc.commiteKqOffset(ctx)
			if err != nil {
				mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】commiter: commiteKqOffset: error: %v", err)
				return // ErrCommitPending 提交阻塞，连续多次，在缓冲非空情况下提交失败，代表很有可能某些数据处理失败
			}
			ticker.Reset(duration)
		}
	}
}

func (mc *MultiWorkerConsumer) commiteKqOffset(ctx context.Context) error {
	result, err := mc.offsetTracker.CollectOffset()
	if err != nil {
		if err == kqbuffer.ErrPopEmpty || err == kqbuffer.ErrPopPending {
			return nil
		}
		return err
	}

	for partition, lastMsg := range result {
		if err := mc.kafkaReader.CommitMessages(ctx, *lastMsg); err != nil {
			// 处理策略：本次提交错误仍当做本次已消费，由下一次提交覆盖正确
			mc.Logger.Errorf("【kqutil.MultiWorkerConsumer】commiter: commiteKqOffset partition %d, offset %d: error: %v",
				partition, lastMsg.Offset, err)
		}
	}
	return nil
}
