package kafka

import (
	"context"
	"github.com/Shopify/sarama"
	"log"
	"strings"
	"sync"
	"time"
)

var (
	version  = "0.10.2.1"
	assignor = "range"
	oldest   = true
	verbose  = false
)

type DataChan struct {
	Timestamp      time.Time // only set if kafka is version 0.10+, inner message timestamp
	BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp

	Key, Value []byte
	Topic      string
	Partition  int32
	Offset     int64
}

// Consumer represents a Sarama consumer group consumer
type Consumer struct {
	ready    chan bool
	dataChan chan DataChan
}

// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
	// Mark the consumer as ready
	close(consumer.ready)
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {

	// NOTE:
	// Do not move the code below to a goroutine.
	// The `ConsumeClaim` itself is called within a goroutine, see:
	// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
	for message := range claim.Messages() {
		//if c%30000 == 0 {
		//	log.Printf("Message claimed:partition = %d, value = %s, timestamp = %v, topic = %s", message.Partition, string(message.Value), message.Timestamp, message.Topic)
		//}
		consumer.dataChan <- DataChan{
			Value:     message.Value,
			Timestamp: message.Timestamp,
			Key:       message.Key,
			Partition: message.Partition,
			Topic:     message.Topic,
			Offset:    message.Offset,
		}
		session.MarkMessage(message, "")
	}

	return nil
}
func init()  {
	log.SetFlags(log.Lmicroseconds|log.Lshortfile)
}

func InitConsume(ctx context.Context, topics, group, user, password string, brokers []string, dataChan chan DataChan) {
	var err error

	log.Println("Starting a new Sarama consumer")

	//sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)

	version, err := sarama.ParseKafkaVersion(version)
	if err != nil {
		log.Panicf("Error parsing Kafka version: %v", err)
	}

	config := sarama.NewConfig()
	config.Version = version
	config.Net.SASL.Enable = true
	config.Net.SASL.User = user
	config.Net.SASL.Password = password
	config.Net.SASL.Handshake = true

	switch assignor {
	case "sticky":
		config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky
	case "roundrobin":
		config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
	case "range":
		config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
	default:
		log.Panicf("Unrecognized consumer group partition assignor: %s", assignor)
	}

	config.Consumer.Offsets.Initial = sarama.OffsetNewest

	/**
	 * Setup a new Sarama consumer group
	 */
	consumer := Consumer{
		ready:    make(chan bool),
		dataChan: dataChan,
	}

	client, err := sarama.NewConsumerGroup(brokers, group, config)
	if err != nil {
		log.Panicf("Error creating consumer group client: %v", err)
	}

	wg := &sync.WaitGroup{}
	wg.Add(1)
	go func() {
		defer wg.Done()
		for {
			// `Consume` should be called inside an infinite loop, when a
			// server-side rebalance happens, the consumer session will need to be
			// recreated to get the new claims
			if err := client.Consume(ctx, strings.Split(topics, ","), &consumer); err != nil {
				log.Panicf("Error from consumer: %v", err)
			}
			// check if context was cancelled, signaling that the consumer should stop
			if ctx.Err() != nil {
				return
			}
			consumer.ready = make(chan bool)
		}
	}()

	<-consumer.ready // Await till the consumer has been set up
	log.Println("Sarama consumer up and running!...")

	select {
	case <-ctx.Done():
		log.Println("consume done...")
	}

	wg.Wait()
	err = client.Close()
	log.Println("consume client closed ")

}

