package kafka

import (
	"context"
	"fmt"
	log "gitee.com/vrv_media/go-micro-framework/pkg/logger"
	"github.com/segmentio/kafka-go"
	"time"
)

type MessageHandler func(message string)

type Consumer struct {
	logger          log.LoggerHelper
	Addr            string
	Topic           string
	consumer        *kafka.Reader
	messageHandlers []MessageHandler
}

func NewKafkaConsumerClient(addr, topic string) *Consumer {
	consumerClient := &Consumer{
		logger:          log.NewNameLogWithDefaultLog("kafka-consumer"),
		Addr:            addr,
		Topic:           topic,
		messageHandlers: make([]MessageHandler, 0),
	}
	consumerClient.consumer = kafka.NewReader(kafka.ReaderConfig{
		Brokers:                []string{addr}, // broker地址 数组
		GroupID:                topic,          // 消费者组id，每个消费者组可以消费kafka的完整数据，但是同一个消费者组中的消费者根据设置的分区消费策略共同消费kafka中的数据
		GroupTopics:            nil,
		Topic:                  topic, // 消费哪个topic
		Partition:              0,
		Dialer:                 nil,
		QueueCapacity:          0,
		MinBytes:               0,
		MaxBytes:               0,
		MaxWait:                0,
		ReadBatchTimeout:       0,
		ReadLagInterval:        0,
		GroupBalancers:         nil,
		HeartbeatInterval:      0,
		CommitInterval:         time.Second, // offset 上报间隔
		PartitionWatchInterval: 0,
		WatchPartitionChanges:  false,
		SessionTimeout:         0,
		RebalanceTimeout:       0,
		JoinGroupBackoff:       0,
		RetentionTime:          0,
		StartOffset:            kafka.LastOffset, // 仅对新创建的消费者组生效，从头开始消费，工作中可能更常用从最新的开始消费kafka.LastOffset
		ReadBackoffMin:         0,
		ReadBackoffMax:         0,
		Logger:                 nil,
		ErrorLogger:            nil,
		IsolationLevel:         0,
		MaxAttempts:            0,
		OffsetOutOfRangeError:  false,
	})
	// 直接启动监听
	consumerClient.ListenMessage(context.Background())
	return consumerClient
}

func (c *Consumer) AddMessageHandler(handler MessageHandler) {
	if handler == nil {
		return
	}
	c.messageHandlers = append(c.messageHandlers, handler)
}

func (c *Consumer) ListenMessage(ctx context.Context) {
	// 监听kafka消息，需要一直在协程中运行
	go func() {
		for {
			if msg, err := c.consumer.ReadMessage(ctx); err != nil {
				c.logger.Error(fmt.Sprintf("读kafka失败，err:%v", err))
				continue // 当前消息读取失败时，并不退出for终止所有后续消费，而是跳过该消息即可
			} else {
				message := string(msg.Value)
				c.logger.Error(fmt.Sprintf("topic=%s,partition=%d,offset=%d,key=%s,message=%v", msg.Topic, msg.Partition, msg.Offset, msg.Key, message))
				// 通知每一个socket
				go c.SendMessageToHandlers(message)
			}
		}
	}()
}

func (c *Consumer) SendMessageToHandlers(message string) {
	if len(c.messageHandlers) == 0 {
		return
	}
	for _, handler := range c.messageHandlers {
		c.handleMessage(handler, message)
	}
}

func (c *Consumer) handleMessage(handler MessageHandler, message string) {
	if handler == nil || message == "" {
		return
	}
	defer func() {
		err := recover()
		if err != nil {
			c.logger.ErrorF("handleMessage panic,err:%v", err)
		}
	}()
	handler(message)
}
