package glog2

import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/Shopify/sarama"
	"github.com/go-zookeeper/zk"
	"github.com/sirupsen/logrus"
	"time"
)

var logrusFormatter = &logrus.JSONFormatter{
	TimestampFormat: "2006-01-02T15:04:05.999+08:00",
	FieldMap: logrus.FieldMap{
		logrus.FieldKeyTime:  "@timestamp",
		logrus.FieldKeyLevel: "LogLevel",
		logrus.FieldKeyMsg:   "message",
		logrus.FieldKeyFunc:  "caller",
	},
}

type moduleKafka struct {
	zkHosts        []string
	logCh          <-chan *logrus.Entry
	zkConn         *zk.Conn
	brokerChangeCh <-chan zk.Event
	errMainLoop    error
	kafkaProducer  sarama.AsyncProducer
	ctx            context.Context
	cancel         context.CancelFunc
	logMsgToSend   *sarama.ProducerMessage
}

func newModuleKafka(zkHosts []string, logCh <-chan *logrus.Entry, ctx context.Context) *moduleKafka {
	k := &moduleKafka{}
	k.zkHosts = zkHosts
	k.logCh = logCh
	k.ctx, k.cancel = context.WithCancel(ctx)
	return k
}
func (k *moduleKafka) start() {
	go k.mainLoop()
}

func (k *moduleKafka) mainLoop() {
	for k.ctx.Err() == nil {
		k.watchBrokers()
	}
}
func (k *moduleKafka) watchBrokers() {
	defer func() {
		if p := recover(); p != nil {
			logrus.Errorf("%s 未知错误 %s", pkgTag, p)
			time.Sleep(time.Second * 2)
			return
		}

		if k.errMainLoop != nil {
			logrus.Errorf("%s 错误：\n%s", pkgTag, k.errMainLoop)
			time.Sleep(time.Second * 2)
			return
		}
	}()

	k.zkConn, _, k.errMainLoop = zk.Connect(k.zkHosts, time.Second*2) //*10)
	if k.errMainLoop != nil {
		k.errMainLoop = fmt.Errorf("zookeeper 连接失败：\n%w", k.errMainLoop)
		return
	}
	defer func() {
		logrus.Debugf("停止观察brokers")
		if k.zkConn != nil {
			k.zkConn.Close()
		}
		if k.kafkaProducer != nil {
			_ = k.kafkaProducer.Close()
		}
	}()

	_, _, k.brokerChangeCh, k.errMainLoop = k.zkConn.ChildrenW(zkBrokerIdPath)
	if k.errMainLoop != nil {
		k.errMainLoop = fmt.Errorf("zookeeper 获取brokers失败：\n%w", k.errMainLoop)
		return
	}

	k.findAndConnectKafka()
	if k.errMainLoop != nil {
		return
	}

	for k.ctx.Err() == nil {
		k.eventLoop()
		if k.errMainLoop != nil {
			return
		}
	}

}

func (k *moduleKafka) eventLoop() {
	if k.logMsgToSend != nil {
		if k.kafkaProducer == nil {
			k.errMainLoop = fmt.Errorf("日志发送失败：kafkaProducer == nil")
			return
		}

		select {
		case <-k.ctx.Done():
			return
		case brokerChange := <-k.brokerChangeCh:
			k.onBrokerChanged(brokerChange)
		case kafkaErr := <-k.kafkaProducer.Errors():
			k.errMainLoop = kafkaErr.Err
		case k.kafkaProducer.Input() <- k.logMsgToSend:
			k.logMsgToSend = nil
		}
	} else {
		select {
		case <-k.ctx.Done():
			return
		case brokerChange := <-k.brokerChangeCh:
			k.onBrokerChanged(brokerChange)
		case kafkaErr := <-k.kafkaProducer.Errors():
			k.errMainLoop = kafkaErr.Err
		case log := <-k.logCh:
			k.onLog(log)
		}
	}
}

func (k *moduleKafka) getBrokerHosts(ids ...string) (hosts []string, err error) {
	connect, _, err := zk.Connect(k.zkHosts, time.Second*2) //*10)
	if err != nil {
		return
	}
	defer func() {
		connect.Close()
	}()

	if len(ids) == 0 {
		ids, _, err = connect.Children(zkBrokerIdPath)
		if err != nil {
			return
		}
	}

	var meta []byte

	for _, id := range ids {
		meta, _, err = connect.Get("/brokers/ids/" + id)
		if err != nil {
			return
		}
		dataMap := struct {
			Host string
			Port int
		}{}
		err = json.Unmarshal(meta, &dataMap)
		if err != nil {
			return
		}
		hosts = append(hosts, fmt.Sprintf("%s:%d", dataMap.Host, dataMap.Port))
	}
	return
}

func (k *moduleKafka) onBrokerChanged(event zk.Event) {
	if event.Type == zk.EventNodeChildrenChanged && event.Path == zkBrokerIdPath {
		logrus.Warnf("%s kafka brokers发生变动", pkgTag)
		_, _, k.brokerChangeCh, k.errMainLoop = k.zkConn.ChildrenW(zkBrokerIdPath)
		if k.errMainLoop != nil {
			return
		}
		k.findAndConnectKafka()
	}
}

func (k *moduleKafka) findAndConnectKafka() {
	var kafkaHosts []string
	kafkaHosts, k.errMainLoop = k.getBrokerHosts()

	if k.kafkaProducer != nil {
		_ = k.kafkaProducer.Close()
	}
	saramaConfig := sarama.NewConfig()
	saramaConfig.Net.DialTimeout = time.Second * 5
	saramaConfig.Net.ReadTimeout = time.Second * 5
	saramaConfig.Net.WriteTimeout = time.Second * 5
	// 随机的分区类型：返回一个分区器，该分区器每次选择一个随机分区
	saramaConfig.Producer.Partitioner = sarama.NewRandomPartitioner
	logrus.Warnf("%s kafka 尝试连接brokers %s", pkgTag, kafkaHosts)
	k.kafkaProducer, k.errMainLoop = sarama.NewAsyncProducer(kafkaHosts, saramaConfig)
	if k.errMainLoop != nil {
		k.errMainLoop = fmt.Errorf("kafka 连接失败：\n%w", k.errMainLoop)
		return
	}
	logrus.Warnf("%s kafka 连接成功", pkgTag)
}

func (k *moduleKafka) onLog(l *logrus.Entry) {
	logBytes, err := logrusFormatter.Format(l)
	if err != nil {
		k.errMainLoop = fmt.Errorf("format entry error\n%w", err)
		return
	}

	k.logMsgToSend = &sarama.ProducerMessage{
		Topic: "logstash",
		Key:   sarama.StringEncoder("key"),
		Value: sarama.ByteEncoder(logBytes),
	}
}

func (k *moduleKafka) close() {
	k.cancel()
}
