package mq

import (
	"context"
	"fmt"
	"math/rand"
	"sync"
	"time"

	"github.com/Shopify/sarama"
	saramaCluster "github.com/bsm/sarama-cluster"
	"github.com/shirou/gopsutil/cpu"
	"github.com/shirou/gopsutil/mem"
	"github.com/sirupsen/logrus"
)

type Consumer struct {
	Brokers       []string
	Topics        []string
	ConsumerGroup string
	Consumer      *saramaCluster.Consumer
	SaslUserName  string
	SaslPasswd    string
}

type PartitionOffset struct {
	OffsetConsumed int64
	OffsetNewest   int64
	Owned          bool
}

type TopicOffset struct {
	Partitions map[int32]*PartitionOffset
}

type OffsetManager struct {
	TopicMap map[string]*TopicOffset
}

type Message sarama.ConsumerMessage

type callbackFunc interface{}

var (
	insConsumer           *Consumer
	onceConsumer          sync.Once
	ConsumerOffsetManager = OffsetManager{
		TopicMap: make(map[string]*TopicOffset),
	}
	OffsetAlertThreshold = 1000
)

func NewKafkaConsumer(brokers []string, consumeGroup, user, passwd string, topics []string) *Consumer {
	onceConsumer.Do(func() {
		config := initClusterConfig()
		if passwd != "" {
			config.Net.SASL.Enable = true
			config.Net.SASL.User = user
			config.Net.SASL.Password = passwd
		}
		consumer, err := saramaCluster.NewConsumer(brokers, consumeGroup, topics, config)
		if err != nil {
			panic(err)
		}
		insConsumer = &Consumer{
			Brokers:       brokers,
			Topics:        topics,
			ConsumerGroup: consumeGroup,
			Consumer:      consumer,
			SaslUserName:  user,
			SaslPasswd:    passwd,
		}
	})
	return insConsumer
}

func (c *Consumer) consumeOffsetMonitor() {
	var (
		mgr = &ConsumerOffsetManager
	)
	config := sarama.NewConfig()
	if c.SaslPasswd != "" {
		config.Net.SASL.Enable = true
		config.Net.SASL.User = c.SaslUserName
		config.Net.SASL.Password = c.SaslPasswd
	}
	client, err := sarama.NewClient(c.Brokers, config)
	if err != nil {
		logrus.Panicf(err.Error(), " failed to start offset monitor with connection failed, brokers %s, sarama config : %s", c.Brokers, *config)
	}

	for {
		report := make(map[string][]string)
		for _, topic := range c.Topics {
			topicOffset, ok := mgr.TopicMap[topic]
			if !ok {
				topicOffset = &TopicOffset{
					Partitions: make(map[int32]*PartitionOffset),
				}
				mgr.TopicMap[topic] = topicOffset
			}
			partitions, err := client.Partitions(topic)
			if err != nil {
				logrus.Errorf(err.Error(), "failed to get partitions of topic %s", topic)
				continue
			}
			var subReport []string
			report[topic] = subReport
			for _, partition := range partitions {
				offset, _ := client.GetOffset(topic, partition, sarama.OffsetNewest)
				partitionOffset, ok := topicOffset.Partitions[partition]
				if !ok {
					partitionOffset = &PartitionOffset{
						OffsetNewest:   0,
						OffsetConsumed: 0,
						Owned:          true,
					}
					topicOffset.Partitions[partition] = partitionOffset
				}
				if !partitionOffset.Owned {
					continue
				}
				partitionOffset.OffsetNewest = offset
				if partitionOffset.OffsetConsumed == 0 {
					continue
				}
				r := offset - partitionOffset.OffsetConsumed
				if r > int64(OffsetAlertThreshold) {
					report[topic] = append(report[topic], fmt.Sprintf("%d: %d", partition, r))
				}
			}
		}
		for topic, subReport := range report {
			if len(subReport) > 0 {
				logrus.Info(" offset report, topic:", topic, ",subReport", subReport)
			}
		}
		s := rand.Intn(60)
		logrus.Info("sleep time:", s)
		time.Sleep(time.Second * time.Duration(s))
	}
}

func (c *Consumer) CallbackConsumer(cb callbackFunc, parallel, callParallel int) context.CancelFunc {
	go c.consumeOffsetMonitor()
	ctx, done := context.WithCancel(context.Background())
	for idx := 0; idx < parallel; idx++ {
		go func(index int) {
			c.consume(c.chanCallback(cb, callParallel, index),
				ctx, index)
		}(idx)
	}
	return done
}

func (c *Consumer) ResourceControl() bool {
	v, err := mem.VirtualMemory()
	if err != nil {
		logrus.Error(err.Error())
		return true
	}

	percent, err := cpu.Percent(time.Second, false)
	if err != nil {
		logrus.Error(err.Error())
		return true
	}
	sum := float64(0)
	for _, per := range percent {
		sum += per
	}
	if v.UsedPercent > 80 || int(sum)/len(percent) > 90 {
		return true
	}
	return false
}

func (c *Consumer) consume(
	msgs chan<- Message,
	ctx context.Context,
	index int) {

	consumer := c.Consumer
	defer consumer.Close()
	logrus.Info("kafka consumer.", "index:", index, " topics:", c.Topics,
		" consumer group:", c.ConsumerGroup)

	for {
		//资源控制模块
		if c.ResourceControl() {
			time.Sleep(2 * time.Second)
			continue
		}
		select {
		case msg, more := <-consumer.Messages():
			if more {
				msgs <- Message(*msg)
				consumer.MarkOffset(msg, "")
				c.MarkOffset(msg.Topic, msg.Partition, msg.Offset)
			}
		case err, more := <-consumer.Errors():
			if more {
				logrus.Error("consume error", err.Error())
			}
		case ntf, more := <-consumer.Notifications():
			if more {
				logrus.Info("notify rebalanced. index:", index, " current:", ntf.Current)
				// remove offset record of those released partitions
				mgr := &ConsumerOffsetManager
				for _topic, _partitions := range ntf.Released {
					topicOffset, ok := mgr.TopicMap[_topic]
					if !ok {
						continue
					}
					for _, _partition := range _partitions {
						partitionOffset, ok := topicOffset.Partitions[_partition]
						if ok {
							partitionOffset.Owned = false
						}
					}
				}
			}
		case <-ctx.Done():
			return
		}
	}
}

func (c *Consumer) MarkOffset(topic string, partition int32, offset int64) {
	mgr := &ConsumerOffsetManager
	topicOffset, ok := mgr.TopicMap[topic]
	if !ok {
		return
	}
	partitionOffset, ok := topicOffset.Partitions[partition]
	if !ok {
		return
	}
	partitionOffset.OffsetConsumed = offset
	partitionOffset.Owned = true
}

func (c *Consumer) chanCallback(callback callbackFunc, parallel int, fetchIndex int) chan<- Message {
	msgs := make(chan Message, parallel)
	for i := 0; i < parallel; i++ {
		consumeIndex := i
		go func() {
			for msg := range msgs {
				simpleCallback, ok := callback.(func(Message))
				if ok {
					simpleCallback(msg)
					continue
				}
				indexCallback, ok := callback.(func(Message, int, int))
				if ok {
					indexCallback(msg, fetchIndex, consumeIndex)
					continue
				}
				logrus.Panic("callback define error")
			}
		}()
	}
	return msgs
}

func initClusterConfig() *saramaCluster.Config {
	config := saramaCluster.NewConfig()
	config.Consumer.Return.Errors = true
	config.Version = sarama.V0_10_1_0
	// set this to read from very first offset
	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
	config.Group.Return.Notifications = true
	config.Net.KeepAlive = time.Second * 5
	config.ChannelBufferSize = 1024 * 1024 * 4
	return config
}
