package kafka2

import (
	"gitee.com/chejiangyi/bsfgo/core/base2"
	"strings"
)
import "github.com/IBM/sarama"

var KafkaUtil = &KafkaUtilTool{}

type KafkaUtilTool struct{}

func (m *KafkaUtilTool) CreateProducer() *KafkaProducer {
	config := KafkaConfig{
		Brokers: strings.Split(base2.BsfConfigInstance.GetKafkaBrokers(), ","),
		Version: base2.BsfConfigInstance.GetKafkaVersion(), // 例如 "2.5.0"
	}
	config.ProducerRequiredAck = config.parseRequiredAcks(base2.BsfConfigInstance.GetKafkaProducerRequiredAck())        // 默认 WaitForLocal
	config.ProducerMaxMessageBytes = base2.BsfConfigInstance.GetKafkaProducerMaxMessageBytes()                          // 默认 1000000 (1MB)
	config.ProducerCompression = config.parseProducerCompression(base2.BsfConfigInstance.GetKafkaProducerCompression()) // 默认 CompressionNone
	config.ProducerReturnSuccesses = base2.BsfConfigInstance.GetKafkaProducerReturnSuccesses()
	return m.CreateProducer2(config)
}

func (*KafkaUtilTool) CreateProducer2(config KafkaConfig) *KafkaProducer {
	cfg := sarama.NewConfig()
	config.CheckConfig(cfg)
	// 设置Producer所需的确认模式，这里设置为等待所有同步副本确认
	cfg.Producer.RequiredAcks = config.ProducerRequiredAck
	// 设置分区器，这里使用随机分区器
	cfg.Producer.Partitioner = sarama.NewRandomPartitioner
	// 设置消息成功发送时返回
	cfg.Producer.Return.Successes = config.ProducerReturnSuccesses
	// 设置压缩
	cfg.Producer.Compression = config.ProducerCompression

	producer, err := sarama.NewSyncProducer(config.Brokers, cfg)
	if err != nil {
		panic(base2.NewBsfError2("kafka producer创建失败: %v", err))
	}
	return &KafkaProducer{SyncProducer: producer}
}

func (m *KafkaUtilTool) CreateConsumerGroup() *KafkaConsumer {
	config := KafkaConfig{
		Brokers: strings.Split(base2.BsfConfigInstance.GetKafkaBrokers(), ","),
		Version: base2.BsfConfigInstance.GetKafkaVersion(), // 例如 "2.5.0"

	}
	config.GroupID = base2.BsfConfigInstance.GetKafkaGroupID()                                                           // 消费者组ID
	config.ConsumerInitialOffset = m.parseConsumerInitialOffset(base2.BsfConfigInstance.GetKafkaConsumerInitialOffset()) // 默认 OffsetNewest
	//ConsumerAssignor:                  utils2.BsfConfigInstance.GetKafkaConsumerAssignor(),
	config.ConsumerReturnErrors = base2.BsfConfigInstance.GetKafkaConsumerReturnErrors()
	config.ConsumerOffsetsAutoCommitEnable = base2.BsfConfigInstance.GetKafkaConsumerOffsetsAutoCommitEnable()
	config.ConsumerOffsetsAutoCommitInterval = base2.BsfConfigInstance.GetKafkaConsumerOffsetsAutoCommitIntervalSeconds()
	config.ConsumerMaxProcessingTime = base2.BsfConfigInstance.GetKafkaConsumerMaxProcessingTimeSeconds()
	return m.CreateConsumerGroup2(config)
}

// 内部函数：创建消费者组
func (*KafkaUtilTool) CreateConsumerGroup2(config KafkaConfig) *KafkaConsumer {
	cfg := sarama.NewConfig()
	config.CheckConfig(cfg)
	cfg.Consumer.Offsets.Initial = config.ConsumerInitialOffset
	cfg.Consumer.Return.Errors = config.ConsumerReturnErrors
	cfg.Consumer.Offsets.AutoCommit.Enable = config.ConsumerOffsetsAutoCommitEnable
	cfg.Consumer.Offsets.AutoCommit.Interval = config.ConsumerOffsetsAutoCommitInterval
	cfg.Consumer.MaxProcessingTime = config.ConsumerMaxProcessingTime
	consumerGroup, err := sarama.NewConsumerGroup(config.Brokers, config.GroupID, cfg)
	if err != nil {
		panic(base2.NewBsfError2("kafka consumer创建失败: %v", err))
	}
	return &KafkaConsumer{ConsumerGroup: consumerGroup}
}
