package saramax

import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/IBM/sarama"
	"github.com/prometheus/client_golang/prometheus"
	"time"
	"webook/pkg/logger"
)

type PrometheusBatchHandler[T any] struct {
	fn             func(msgs []*sarama.ConsumerMessage, ts []T) error
	l              logger.LoggerV1
	backLogVector  *prometheus.SummaryVec
	costTimeVector *prometheus.SummaryVec
	client         sarama.Client
	groupId        string
}

func NewPrometheusBatchHandler[T any](l logger.LoggerV1, fn func(msgs []*sarama.ConsumerMessage, ts []T) error,
	client sarama.Client, groupId string) *PrometheusBatchHandler[T] {
	backLogVector := prometheus.NewSummaryVec(prometheus.SummaryOpts{
		Namespace: "cutele",
		Subsystem: "webook",
		Name:      "kafka_consumer_backlog",
		Help:      "Kafka消费积压信息",
	}, []string{"topic", "partition"})
	costTimeVector := prometheus.NewSummaryVec(prometheus.SummaryOpts{
		Namespace: "cutele",
		Subsystem: "webook",
		Name:      "kafka_consumer_duration",
		Help:      "Kafka消费者耗时信息",
	}, []string{"topic"})
	prometheus.MustRegister(backLogVector)
	prometheus.MustRegister(costTimeVector)
	return &PrometheusBatchHandler[T]{
		fn:             fn,
		l:              l,
		backLogVector:  backLogVector,
		costTimeVector: costTimeVector,
		client:         client,
		groupId:        groupId,
	}
}

func (b *PrometheusBatchHandler[T]) Setup(session sarama.ConsumerGroupSession) error {
	return nil
}

func (b *PrometheusBatchHandler[T]) Cleanup(session sarama.ConsumerGroupSession) error {
	return nil
}

func (b *PrometheusBatchHandler[T]) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
	msgs := claim.Messages()
	const batchSize = 10
	//job8.md:积压量
	go func() { //开一个协程出去监听分区和偏移量变化
		for {
			topic := claim.Topic()
			partition := claim.Partition()
			om, err := sarama.NewOffsetManagerFromClient(b.groupId, b.client)
			if err != nil {
				b.l.Error("获取OffsetManager错误", logger.String("groupId", b.groupId), logger.String("topic", topic),
					logger.Int32("partition", partition), logger.Error(err))
				time.Sleep(1 * time.Second)
				continue
			}
			offset, err := b.client.GetOffset(topic, partition, -1) //下一条数据的offset
			pom, err := om.ManagePartition(topic, partition)        // 创建 sarama.PartitionOffsetManager
			if err != nil {
				b.l.Error("获取PartitionOffsetManager错误", logger.String("groupId", b.groupId), logger.String("topic", topic),
					logger.Int32("partition", partition), logger.Error(err))
				time.Sleep(1 * time.Second)
				continue
			}
			var backlog int64
			// 调用 sarama.PartitionOffsetManager的NextOffset方法。返回下一次要消费的offset
			// 如果消费者组未消费过该partation的数据，返回值将为-1
			n, er := pom.NextOffset()
			if er != "" {
				b.l.Error("获取NextOffset错误", logger.String("groupId", b.groupId), logger.String("topic", topic),
					logger.Int32("partition", partition), logger.String("error", er))
				time.Sleep(1 * time.Second)
				continue
			}
			if n == -1 {
				backlog = offset
			} else {
				backlog = offset - n
			}
			b.backLogVector.WithLabelValues(topic, fmt.Sprintf("%v", partition)).Observe(float64(backlog))
			time.Sleep(1 * time.Second)
		}
	}()
	for {
		batch := make([]*sarama.ConsumerMessage, 0, batchSize)
		ts := make([]T, 0, batchSize)
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		var done = false
		for i := 0; i < batchSize && !done; i++ {
			select {
			case <-ctx.Done():
				// 超时了
				done = true
			case msg, ok := <-msgs:
				if !ok {
					cancel()
					return nil
				}
				batch = append(batch, msg)
				var t T
				err := json.Unmarshal(msg.Value, &t)
				if err != nil {
					b.l.Error("反序列消息体失败",
						logger.String("topic", msg.Topic),
						logger.Int32("partition", msg.Partition),
						logger.Int64("offset", msg.Offset),
						logger.Error(err))
					continue
				}
				batch = append(batch, msg)
				ts = append(ts, t)
			}
		}
		cancel()
		// 凑够了一批，然后你就处理
		stratTime := time.Now()
		err := b.fn(batch, ts)
		//job8.md 记录消费消息的平均时间
		b.costTimeVector.WithLabelValues(claim.Topic()).Observe(float64(time.Since(stratTime).Milliseconds()) / batchSize)
		if err != nil {
			b.l.Error("处理消息失败",
				// 把真个 msgs 都记录下来
				logger.Error(err))
		}
		for _, msg := range batch {
			session.MarkMessage(msg, "")
		}
	}
}
