package kconsume

import (
	"context"
	"github.com/Shopify/sarama"
	"github.com/bsm/sarama-cluster"
	"log"
	"strings"
	"time"
)

type KafkaConsume struct {
	GroupId 	string
	Topic 		string
	Brokers 	string

	consume 	*cluster.Consumer
	cancel 		context.CancelFunc
	logFunc 	func(v ...interface{})
}

func (c *KafkaConsume) Close()  {
	c.cancel()
	return
}

func NewKafkaConsume(groupId string, topic string, brokers string, cancel context.CancelFunc, logFunc func(v ...interface{})) *KafkaConsume {
	return &KafkaConsume{
		GroupId: groupId,
		Topic: topic,
		Brokers: brokers,
		cancel: cancel,
		logFunc: logFunc,
	}
}

func (c *KafkaConsume) SetLogger(logger *log.Logger)  {
	sarama.Logger = logger
	return
}

func (c *KafkaConsume) Start(ctx context.Context) error {
	config := cluster.NewConfig()
	config.Group.Return.Notifications = true

	// offset相关：
	// 1. sarama中，offset有auto commit的功能，缺省为1s；
	// 2. 也可以利用MarkOffset，自己手动管理，比如：100条就MarkOffset，然后利用CommitOffsets提交；
	config.Consumer.Offsets.CommitInterval = 1 * time.Second
	config.Consumer.Offsets.Initial = sarama.OffsetNewest

	consume, err := cluster.NewConsumer(strings.Split(c.Brokers, ","), c.GroupId, strings.Split(c.Topic, ","), config)
	if err != nil {
		return err
	}
	c.consume = consume

	c.errLog(ctx)
	return nil
}

func (c *KafkaConsume) errLog(ctx context.Context) {
	go func(consumer *cluster.Consumer) {
		errors := consumer.Errors()
		notify := consumer.Notifications()
		for {
			select {
			case err := <- errors:
				if err != nil {
					if c.logFunc != nil {
						c.logFunc("KafkaConsume: ", err.Error())
					}
				}
			case <- notify:

			case <- ctx.Done():
				if c.logFunc != nil {
					c.logFunc("KafkaConsume: Done stop kafka consumer, close consumer resource.")
				}
				consumer.Close()
				return
			}
		}
	}(c.consume)

	return
}

func (c *KafkaConsume) Msg() <-chan *sarama.ConsumerMessage {
	return c.consume.Messages()
}
// 手动管理Offset待实现；




// https://www.cnblogs.com/xiaodf/p/6093261.html
// https://www.cnblogs.com/gm-201705/p/7944362.html
// https://blog.csdn.net/jeffrey11223/article/details/80706515

// 使用说明：
// 1. 如果需要多个consumer去消费同一个topic时，可以多次调用cluster.NewConsumer来创建多个consumer；
//	  - 参数groupId，同一个group的consumer，它们的groupId必须相同；
//	  - 如果新建另一个group，则可以重复消费topic的数据；
// 2. 同一个group的consumer，kafka会均衡的为每个consumer分配partition，并遵循策略从partition中弹出消息给之前分配好的consumer；
//    - 每个partition每次只能有一个consumer进行消费，故，consumer的数量应小于partition数量；

// 获取某个consumer group对应的某个分区的offset位置
func (c *KafkaConsume) GetPartitionOffset(partition int32) (int64, error) {
	config := sarama.NewConfig()
	config.Consumer.Offsets.CommitInterval = 1 * time.Second
	config.Version = sarama.V2_0_0_0

	client, err := sarama.NewClient(strings.Split(c.Brokers, ","), config)
	if err != nil {
		return 0, err
	}
	defer client.Close()

	offsetManager, err := sarama.NewOffsetManagerFromClient(c.GroupId, client)
	if err != nil {
		return 0, err
	}
	defer offsetManager.Close()

	partitionOffsetManager, err := offsetManager.ManagePartition(c.Topic, partition)
	if err != nil {
		return 0, err
	}
	defer partitionOffsetManager.Close()

	nextOffset, _ := partitionOffsetManager.NextOffset()
	return nextOffset, nil
}

