package kafka

import (
	"Logger/logAgent/collect"
	"Logger/logTransfer/etcd"
	"context"
	"encoding/json"
	"fmt"
	"github.com/Shopify/sarama"
	"github.com/sirupsen/logrus"
	"go.etcd.io/etcd/api/v3/mvccpb"
	"time"
)

var (
	consumerGroup sarama.ConsumerGroup
	consumer      sarama.Consumer
)

type partitionInstance struct {
	partionList []int32
	topic       string

	ctx    context.Context
	cancel context.CancelFunc
}

var (
	//
	consumerMgr kafkaManager
)

type partitions []partitionInstance

type kafkaManager struct {
	partitionMap map[string]*partitionInstance
}

func newPartitionInstance(topic string) *partitionInstance {
	var pi partitionInstance
	pi.partionList, _ = consumer.Partitions(topic)
	pi.topic = topic
	pi.ctx, pi.cancel = context.WithCancel(context.Background())
	return &pi
}

func (k *kafkaManager) updateConf(collects collect.Collections) {
	//新增
	for _, col := range collects {
		_, ok := k.partitionMap[col.Topic]
		if !ok {
			instance := newPartitionInstance(col.Topic)
			k.partitionMap[col.Topic] = instance
			go instance.consume()
		}
	}

	//减少
	for topic, _ := range k.partitionMap {
		var found bool
		for _, collection := range collects {
			if collection.Topic == topic {
				found = true
				break
			}
		}
		//如果新配置中没有
		if !found {
			k.partitionMap[topic].cancel()
			delete(k.partitionMap, topic)
		}
	}
}

func (k *kafkaManager) delConf() {
	for _, tsk := range k.partitionMap {
		tsk.cancel()
	}
	k.partitionMap = make(map[string]*partitionInstance)
}

func (k *kafkaManager) watch(key string) {
	watchChan := etcd.Cli.Watch(context.Background(), key)
	for wrsp := range watchChan {
		for _, ev := range wrsp.Events {
			fmt.Printf("Type:%v Key:%s Value:%s\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
			switch ev.Type {
			case mvccpb.PUT:
				var colls collect.Collections
				err := json.Unmarshal(ev.Kv.Value, &colls)
				if err != nil {
					logrus.Warning(err.Error())
					continue
				}
				k.updateConf(colls)
			case mvccpb.DELETE:
				k.delConf()
			}
		}
	}
}

func InitConsumer(addrs []string, topics []string, key string) (err error) {
	consumer, err = sarama.NewConsumer(addrs, nil)
	consumerMgr = kafkaManager{
		partitionMap: make(map[string]*partitionInstance),
	}
	newPartitions(topics, key)
	return err
}

func newPartitions(topics []string, key string) {
	//partitions := make([]partitionInstance, len(topics))
	for _, topic := range topics {
		var pi partitionInstance
		pi.partionList, _ = consumer.Partitions(topic)
		pi.topic = topic
		pi.ctx, pi.cancel = context.WithCancel(context.Background())
		consumerMgr.partitionMap[topic] = &pi
		go pi.consume()
	}
	go consumerMgr.watch(key)
}

func (p *partitionInstance) consume() {
	for _, partition := range p.partionList {
		pc, _ := consumer.ConsumePartition(p.topic, partition, sarama.OffsetNewest)
		defer pc.AsyncClose()
		go func(partitionConsumer sarama.PartitionConsumer) {
			for msg := range pc.Messages() {
				fmt.Printf("Topic:%s Offset:%d Key:%v Value:%s\n", msg.Topic, msg.Offset, msg.Key, msg.Value)
			}
		}(pc)
	}
	select {
	case <-p.ctx.Done():
		fmt.Println("已关闭：", p.topic)
		return
	}
}

func InitConsumerGroup(addrs []string, topics []string) (err error) {
	config := sarama.NewConfig()
	config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	consumerGroup, err = sarama.NewConsumerGroup(addrs, "collect_log", config)
	if err != nil {
		return err
	}
	run(topics)
	return nil
}

func run(topics []string) {
	ctx, cancel := context.WithCancel(context.Background())
	consumer := &Consumer{
		ready: make(chan bool),
	}
	for {
		err := consumerGroup.Consume(context.Background(), topics, consumer)
		if err != nil {
			logrus.Panicf("consumer error from: %v", err)
		}
		if ctx.Err() != nil {
			cancel()
			return
		}
		consumer.ready = make(chan bool)
	}
	//cancel()
}

// Consumer represents a Sarama consumer group consumer
type Consumer struct {
	ready chan bool
}

// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
	// Mark the consumer as ready
	close(consumer.ready)
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
	// NOTE:
	// Do not move the code below to a goroutine.
	// The `ConsumeClaim` itself is called within a goroutine, see:
	// https://github.com/Shopify/sarama/blob/main/consumer_group.go#L27-L29
	for {
		select {
		case message := <-claim.Messages():
			fmt.Printf(" Message claimed: value = %s, timestamp = %v, topic = %s\n", message.Value, message.Timestamp, message.Topic) //

			time.Sleep(time.Millisecond * 1000)
			session.MarkMessage(message, "")

		// Should return when `session.Context()` is done.
		// If not, will raise `ErrRebalanceInProgress` or `read tcp <ip>:<port>: i/o timeout` when kafka rebalance. see:
		// https://github.com/Shopify/sarama/issues/1192
		case <-session.Context().Done():
			return nil
		}
	}
}
