package mq

import (
	"context"
	"sync"
	"time"

	"github.com/Shopify/sarama"
	"github.com/astaxie/beego/logs"
	"github.com/bsm/sarama-cluster"
)

type KafkaClusterMQ struct {
	async          bool
	mux            *sync.RWMutex
	addrs          []string
	config         *cluster.Config
	consumers      []*cluster.Consumer
	consumerBuf    map[string]chan string
	consumerMsgBuf map[string]chan *Msg
	closed         chan struct{}
	groupIndex     string
	syncProducer   sarama.SyncProducer
	aSyncProducer  sarama.AsyncProducer
	producerMsg    chan<- *sarama.ProducerMessage
}

func NewKafkaClusterMQ(addrs []string, groupIndex string, async bool) *KafkaClusterMQ {
	config := cluster.NewConfig()
	config.Consumer.Return.Errors = true
	config.Group.Return.Notifications = true
	var aSyncProducer sarama.AsyncProducer
	var syncProducer sarama.SyncProducer
	var producerMsg chan<- *sarama.ProducerMessage
	var err error

	if async {
		aSyncProducer, err = sarama.NewAsyncProducer(addrs, nil)
		if err != nil {
			panic(err)
		}
		producerMsg = aSyncProducer.Input()
	} else {
		syncProducer, err = sarama.NewSyncProducer(addrs, nil)
		if err != nil {
			panic(err)
		}
	}

	return &KafkaClusterMQ{
		async:          async,
		addrs:          addrs,
		config:         config,
		groupIndex:     groupIndex,
		mux:            new(sync.RWMutex),
		closed:         make(chan struct{}),
		consumers:      make([]*cluster.Consumer, 0, 10),
		consumerMsgBuf: make(map[string]chan *Msg),
		consumerBuf:    make(map[string]chan string),
		aSyncProducer:  aSyncProducer,
		syncProducer:   syncProducer,
		producerMsg:    producerMsg,
	}
}

func (k *KafkaClusterMQ) receiv(topic string) (chan string, error) {
	buf := make(chan string, 1)

	consumer, err := cluster.NewConsumer(k.addrs, k.groupIndex+"_"+topic, []string{topic}, k.config)
	if err != nil {
		return nil, err
	}

	k.mux.Lock()
	k.consumers = append(k.consumers, consumer)
	k.mux.Unlock()
	go func(buf chan<- string, consumer *cluster.Consumer) {
		for {
			select {
			case msg, more := <-consumer.Messages():
				if !more {
					return
				}

				buf <- string(msg.Value)
				consumer.MarkOffset(msg, "") // mark message as processed
			case _, more := <-consumer.Errors():
				if !more {
					logs.Warn("Errors 管道关闭。退出")
					return
				}
			case _, more := <-consumer.Notifications():
				if !more {
					return
				}
			case <-k.closed:
				return
			}
		}
	}(buf, consumer)

	return buf, nil
}

func (k *KafkaClusterMQ) multiReceiv(app string, topics []string) (msgChan chan *Msg, err error) {
	msgChan = make(chan *Msg, 1)
	consumer, err := cluster.NewConsumer(k.addrs, k.groupIndex+"_"+app, topics, k.config)
	if err != nil {
		return nil, err
	}

	k.mux.Lock()
	k.consumers = append(k.consumers, consumer)
	k.mux.Unlock()
	go func(msgChan chan<- *Msg, consumer *cluster.Consumer) {
		for {
			select {
			case msg, more := <-consumer.Messages():
				if !more {
					return
				}

				msgChan <- &Msg{
					Topic: msg.Topic,
					Value: string(msg.Value),
				}
				consumer.MarkOffset(msg, "") // mark message as processed
			case err, more := <-consumer.Errors():
				if more {
					logs.Warn("Error: %s", err.Error())
				}
			case ntf, more := <-consumer.Notifications():
				if more {
					logs.Warn("Rebalanced: %+v", ntf)
				}
			case <-k.closed:
				return
			}
		}
	}(msgChan, consumer)
	return
}

func (k *KafkaClusterMQ) Destroy() {
	if len(k.consumers) != 0 {
		wg := new(sync.WaitGroup)
		for _, consumer := range k.consumers {
			wg.Add(1)
			go func(consumer *cluster.Consumer) {
				if err := consumer.Close(); err != nil {
					logs.Warn("consumer close err:%v", err.Error())
				}
				wg.Done()
			}(consumer)
		}
		wg.Wait()
	}

	close(k.closed)
}

func (k *KafkaClusterMQ) MultiReceivTopic(app string, topics []string) <-chan *Msg {
	k.mux.RLock()
	if msgChan, ok := k.consumerMsgBuf[app]; ok {
		k.mux.RUnlock()
		return msgChan
	}
	k.mux.RUnlock()

	msgChan, err := k.multiReceiv(app, topics)
	if err != nil {
		logs.Warn("MultiReceivTopic app:%s topic:%v err:%s", app, topics, err.Error())
		return nil
	}

	k.mux.Lock()
	k.consumerMsgBuf[app] = msgChan
	k.mux.Unlock()
	return msgChan
}

func (k *KafkaClusterMQ) ReceiveStringChan(topic string) <-chan string {
	k.mux.RLock()
	if bufChan, ok := k.consumerBuf[topic]; ok {
		k.mux.RUnlock()
		return bufChan
	}
	k.mux.RUnlock()

	bufChan, err := k.receiv(topic)
	if err != nil {
		logs.Warn("ReceiveStringChan topic:%s err:%s", topic, err.Error())
		return nil
	}

	k.mux.Lock()
	k.consumerBuf[topic] = bufChan
	k.mux.Unlock()
	return bufChan
}

func (k *KafkaClusterMQ) newSyncProducer() (err error) {
	k.syncProducer, err = sarama.NewSyncProducer(k.addrs, nil)
	return
}

func (k *KafkaClusterMQ) newASyncProducer() (err error) {
	if k.aSyncProducer == nil {
		k.aSyncProducer, err = sarama.NewAsyncProducer(k.addrs, nil)
		if err != nil {
			return
		}
		k.producerMsg = k.aSyncProducer.Input()
	}
	return
}

func (k *KafkaClusterMQ) SendString(topic string, msg string) (err error) {
	kafkaMsg := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(msg)}
	k.mux.Lock()
	defer k.mux.Unlock()
	if k.async {
		ctx, _ := context.WithTimeout(context.Background(), time.Second*10)
		select {
		case k.producerMsg <- kafkaMsg:
		case <-ctx.Done():
			k.aSyncProducer.AsyncClose()
			k.aSyncProducer = nil
			k.newASyncProducer()
			k.producerMsg <- kafkaMsg
		}
	} else {
		_, _, err = k.syncProducer.SendMessage(kafkaMsg)
	}

	return
}
