package adapter

import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/Shopify/sarama"
	"github.com/samuel/go-zookeeper/zk"
	"github.com/sirupsen/logrus"
	"path"
	"sync"
	"time"
)

type Kafka struct {
	zkHosts      []string
	saramaConfig *sarama.Config
	producerList []sarama.AsyncProducer
	mu           sync.Mutex
	base
}

func (q *Kafka) Close() error {
	var err error
	for _, conn := range q.producerList {
		if e := conn.Close(); e != nil {
			err = e
		}
	}
	q.cancel()
	return err
}

func NewKafkaConfig() *sarama.Config {
	c := sarama.NewConfig()
	c.Version = sarama.V0_11_0_0
	return c
}

func NewKafka(zkHosts []string, config *sarama.Config) (q *Kafka, err error) {
	q = &Kafka{
		zkHosts:      zkHosts,
		saramaConfig: config,
	}
	q.init()

	return
}

func (q *Kafka) getBrokerList() (brokerList []string, err error) {
	defer func() {
		if err != nil {
			err = fmt.Errorf("get broker list error\r%w", err)
		}
	}()

	conn, _, err := zk.Connect(q.zkHosts, time.Second*5)
	if err != nil {
		return
	}
	defer conn.Close()
	ids, _, err := conn.Children("/brokers/ids")
	if err != nil {
		return
	}
	var data []byte
	type KafkaNode struct {
		Host string
		Port int
	}

	for _, id := range ids {
		data, _, err = conn.Get(path.Join("/brokers/ids", id))
		if err != nil {
			return
		}
		node := &KafkaNode{}
		err = json.Unmarshal(data, node)
		if err != nil {
			return
		}
		brokerList = append(brokerList, fmt.Sprintf("%s:%d", node.Host, node.Port))
	}

	return
}

func (q *Kafka) NewProducer() (p Producer, err error) {
	defer func() {
		if err != nil {
			err = fmt.Errorf("can't create Producer\r%w", err)
		}
	}()

	producer := &ProducerKafka{
		core: q,
	}
	brokerList, err := q.getBrokerList()
	if err != nil {
		return
	}

	producer.conn, err = sarama.NewAsyncProducer(brokerList, q.saramaConfig)
	if err != nil {
		return
	}
	p = producer
	q.producerList = append(q.producerList, producer.conn)
	return
}

type ProducerKafka struct {
	core *Kafka
	conn sarama.AsyncProducer
}

func (p *ProducerKafka) Errors() <-chan *sarama.ProducerError {
	return p.conn.Errors()
}

func (p *ProducerKafka) Input() chan<- *sarama.ProducerMessage {
	return p.conn.Input()
}

type ConsumerKafka struct {
	core       *Kafka
	outputChan chan *sarama.ConsumerMessage
}

func (q *Kafka) NewConsumer(topic, groupId string) (c Consumer, err error) {
	brokerList, err := q.getBrokerList()
	if err != nil {
		return
	}
	c_ := &ConsumerKafka{
		core:       q,
		outputChan: make(chan *sarama.ConsumerMessage, 10),
	}

	client, err := sarama.NewConsumerGroup(brokerList, groupId, q.saramaConfig)
	if err != nil {
		return
	}
	var topics = []string{topic}
	consumer := &KafkaConsumerGroupHandlerImp{
		c:     c_,
		ready: make(chan bool),
	}

	go func() {
		for {
			if q.ctx.Err() != nil {
				return
			}

			// `Consume` should be called inside an infinite loop, when a
			// server-side rebalance happens, the consumer session will need to be
			// recreated to get the new claims
			if err = client.Consume(q.ctx, topics, consumer); err != nil {
				logrus.Errorf("[MsgQueue]Error from consumer: %v", err)
				return
			}
			// check if context was cancelled, signaling that the consumer should stop

			consumer.ready = make(chan bool)
		}
	}()

	<-consumer.ready // Await till the consumer has been set up
	logrus.Info(fmt.Sprintf("[MsgQueue]topic(%v), group(%s) is running!...", topics, groupId))
	c = c_

	return
}

// KafkaConsumerGroupHandlerImp represents a Sarama consumer group consumer
type KafkaConsumerGroupHandlerImp struct {
	c     *ConsumerKafka
	ready chan bool
}

// Setup is run at the beginning of a new session, before ConsumeClaim
func (c *KafkaConsumerGroupHandlerImp) Setup(sarama.ConsumerGroupSession) error {
	// Mark the consumer as ready
	close(c.ready)
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (c *KafkaConsumerGroupHandlerImp) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (c *KafkaConsumerGroupHandlerImp) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {

	// NOTE:
	// Do not move the code below to a goroutine.
	// The `ConsumeClaim` itself is called within a goroutine, see:
	// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
	for message := range claim.Messages() {
		select {
		case <-c.c.core.ctx.Done():
			return nil
		case c.c.outputChan <- message:

		}

		session.MarkMessage(message, "")
	}

	return nil
}

func (c *ConsumerKafka) Get() <-chan *sarama.ConsumerMessage {
	return c.outputChan
}

func (c *ConsumerKafka) GetBatch(ctx context.Context, batch int) (list []*sarama.ConsumerMessage) {
	for i := 0; i < batch; i++ {
		select {
		case <-c.core.ctx.Done():
			return
		case <-ctx.Done():
			return
		case m := <-c.outputChan:
			list = append(list, m)
		}
	}

	return
}
