package mq

import (
	"encoding/json"
	"fmt"
	"pd/server/com"
	"pd/server/plugin/logger"
	"strings"
	"sync"
	"time"

	"context"

	kafka "github.com/IBM/sarama"
	"github.com/google/uuid"
)

const DefaultKafkaReceiveTimeout = 3 * time.Second
const RD_KAFKA_RESP_ERR__TIMED_OUT = -185
const defaultKafkaConsumerGroupID = "main"

func NewKafkaClient(topic string, timeout ...time.Duration) MessageQueue {
	k := &KafkaClient{
		topic:              topic,
		readMessageTimeout: DefaultKafkaReceiveTimeout,
		delayProducer:      make(map[string]kafka.SyncProducer),
		retryTimes:         3, // 自动重试3次
		retryWaitTime:      5 * time.Second,
	}
	if len(timeout) > 0 {
		k.readMessageTimeout = timeout[0]
	}
	return k
}

type KafkaClient struct {
	readMessageTimeout time.Duration
	topic              string
	groupLock          sync.Mutex
	c                  kafka.ConsumerGroup
	p                  kafka.SyncProducer
	delayProducer      map[string]kafka.SyncProducer
	retryTimes         uint          // 自动重试次数,不包含第一次,为0表示不自动重试
	retryWaitTime      time.Duration // 重试等待时间
}

func GetServer() string {
	if com.IsLocal() {
		return "kafka.t.jiutiandata.com:9092"
	}
	return "kafka.iot.bidevice.com:9092"
}

const (
	// 延迟队列前缀
	KafkaDelayQueuePrefix = "KafkaDelay_"

	// 延迟消息的原队列key
	KafkaDelayMessageSourceTopic = "KafkaSourceTopic"
	KafkaMessageProduceTimestamp = "ProduceTimestamp" // 消息产生时间
)

type Level struct {
	Delay  time.Duration
	Suffix string
}

// 延迟队列级别列表
var LevelList = []*Level{
	{5 * time.Second, "5s"},   // 5秒
	{10 * time.Second, "10s"}, // 10秒
	{30 * time.Second, "30s"},
	{time.Minute, "1m"},     // 1分钟
	{5 * time.Minute, "5m"}, // 5分钟
	{30 * time.Minute, "30m"},
	{time.Hour, "1h"},
	{3 * time.Hour, "3h"},
}

// 根据延迟时间，划分到不同的延迟队列
// 延迟队列：1m, 5m, 30m, 1h, 3h
func getDelayTopic(delay time.Duration) (time.Duration, string) {
	for _, level := range LevelList {
		if delay <= level.Delay {
			return level.Delay, KafkaDelayQueuePrefix + level.Suffix
		}
	}
	last := LevelList[len(LevelList)-1]
	return last.Delay, KafkaDelayQueuePrefix + last.Suffix
}

type Partitioner struct {
}

func (*Partitioner) Partition(message *kafka.ProducerMessage, numPartitions int32) (int32, error) {
	return 0, nil
}

func (*Partitioner) RequiresConsistency() bool {
	return true
}

func (k *KafkaClient) newProducer() (kafka.SyncProducer, error) {
	config := kafka.NewConfig()
	config.Producer.Return.Successes = true
	var producer kafka.SyncProducer
	var err error
	for i := uint(0); i <= k.retryTimes; i++ {
		if i > 0 {
			time.Sleep(k.retryWaitTime)
		}
		if producer, err = kafka.NewSyncProducer(strings.Split(GetServer(), ","), config); err == nil {
			return producer, err
		}
	}
	return producer, err
}

// 推送消息
func (k *KafkaClient) PushJson(msg interface{}, header MessageHeader, delay ...time.Duration) error {
	var err error
	for try := 0; try < 10; try++ {
		if err = k.doPushJson(msg, header, delay...); err != nil {
			logger.Error(err)
			time.Sleep(time.Duration(try*5) * time.Second)
			continue
		}
		break
	}
	return err
}

func (k *KafkaClient) doPushJson(msg interface{}, header MessageHeader, delay ...time.Duration) error {
	msgBytes, err := json.Marshal(msg)
	if err != nil {
		return err
	}
	msgId := uuid.New().String()
	// var delayStr string
	// if len(delay) > 0 {
	// 	delayStr = delay[0].String()
	// }
	// logger.Infof("Kafka Produce msgID[%s] delay[%s] msg[%s]", msgId, delayStr, string(msgBytes))

	m := &kafka.ProducerMessage{}
	m.Headers = append(m.Headers, kafka.RecordHeader{Key: []byte("ID"), Value: []byte(msgId)})
	if header != nil {
		for k, v := range header {
			m.Headers = append(m.Headers, kafka.RecordHeader{Key: []byte(k), Value: []byte(v)})
		}
	}

	var producer kafka.SyncProducer = nil

	if len(delay) > 0 && delay[0] > 0 { // 放进延迟队列
		waitTime, delayQueue := getDelayTopic(delay[0])
		if producer = k.delayProducer[delayQueue]; producer == nil {
			if producer, err = k.newProducer(); err != nil {
				return err
			}
			k.delayProducer[delayQueue] = producer
		}
		m.Topic = delayQueue
		m.Value = kafka.StringEncoder(string(msgBytes))
		m.Headers = append(m.Headers, kafka.RecordHeader{Key: []byte(KafkaDelayMessageSourceTopic), Value: []byte(k.topic)})                                            // 原队列
		m.Headers = append(m.Headers, kafka.RecordHeader{Key: []byte(KafkaMessageProduceTimestamp), Value: []byte(fmt.Sprintf("%d", time.Now().Add(waitTime).Unix()))}) // 入队时间
	} else {
		if producer = k.p; producer == nil {
			if producer, err = k.newProducer(); err != nil {
				return err
			}
			k.p = producer
		}
		m.Topic = k.topic
		m.Value = kafka.StringEncoder(string(msgBytes))
	}

	for i := uint(0); i <= k.retryTimes; i++ {
		if i > 0 {
			time.Sleep(k.retryWaitTime)
		}
		if _, _, err = producer.SendMessage(m); err == nil {
			return nil
		}
		logger.Error(err)
	}

	return err
}

func NewKafkaConsumerHandler(fn ConsumeClaim) *KafkaConsumerHandler {
	return &KafkaConsumerHandler{
		consumeClaim: fn,
	}
}

type ConsumeClaim func(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error
type KafkaConsumerHandler struct {
	consumeClaim ConsumeClaim
}

// Setup is run at the beginning of a new session, before ConsumeClaim.
func (h *KafkaConsumerHandler) Setup(kafka.ConsumerGroupSession) error {
	// 	logger.Info("session setup")
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
// but before the offsets are committed for the very last time.
func (h *KafkaConsumerHandler) Cleanup(s kafka.ConsumerGroupSession) error {
	//	logger.Info("session cleanup")
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
// Once the Messages() channel is closed, the Handler must finish its processing
// loop and exit.
func (h *KafkaConsumerHandler) ConsumeClaim(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error {
	return h.consumeClaim(s, c)
}

//&kafka.ConfigMap{
//			"bootstrap.servers": GetServer(),
//			"group.id":          "main",
//			// "auto.offset.reset": "earliest",
//			"enable.auto.commit": "false",
//		}

type saramaMsgNode struct {
	s   kafka.ConsumerGroupSession
	msg *kafka.ConsumerMessage
}

func (k *KafkaClient) PollMessage(fn MessageHandler, notAutoConfirm ...bool) error {
	var err error
	k.groupLock.Lock()
	if k.c == nil {
		config := kafka.NewConfig()
		config.Metadata.AllowAutoTopicCreation = true
		config.Consumer.Offsets.AutoCommit.Enable = false
		config.Consumer.Offsets.Initial = kafka.OffsetOldest
		// config.Consumer.Offsets.AutoCommit.Interval = time.Second
		config.Consumer.Offsets.Retry.Max = 3
		// config.Consumer.MaxWaitTime = 5 * time.Second
		// config.Consumer.Group.Rebalance.Retry.Backoff = 10 * time.Second
		config.Consumer.Group.Rebalance.Retry.Max = 0x7fffffff
		config.Consumer.Return.Errors = true

		for {
			k.c, err = kafka.NewConsumerGroup(strings.Split(GetServer(), ","), defaultKafkaConsumerGroupID, config)
			if err != nil {
				logger.Error(err)
				time.Sleep(5 * time.Second)
				continue
			} else {
				break
			}
		}
	}
	k.groupLock.Unlock()

	isAutoConfirm := true
	if len(notAutoConfirm) > 0 && notAutoConfirm[0] == true {
		isAutoConfirm = false
	}
	for {
		k.c.Consume(context.Background(), []string{k.topic}, NewKafkaConsumerHandler(func(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error {
			for {
				msg := <-c.Messages()
				if msg == nil {
					return nil
				}
				// var msgId string
				// for _, h := range msg.Headers {
				// 	if string(h.Key) == "ID" {
				// 		msgId = string(h.Value)
				// 	}
				// }
				// 				logger.Infof("Kafka Consume msgID[%s] msg[%s]", msgId, string(msg.Value))
				func() {
					m := &Message{
						Offset: msg.Offset,
						Body:   msg.Value,
						Header: make(map[string]string),
					}

					if isAutoConfirm {
						m.Commit = func() {
						}
					} else {
						m.Commit = func() {
							s.MarkMessage(msg, "")
							s.Commit()
						}
					}

					for _, header := range msg.Headers {
						m.Header[string(header.Key)] = string(header.Value)
					}
					fn(m)

					if isAutoConfirm {
						func() {
							s.MarkMessage(msg, "")
							s.Commit()
						}()
					}
				}()
			}
		}))
	}
}

// 设置拉取消息超时时间
func (k *KafkaClient) SetReceiveTimeOut(timeout time.Duration) {
	k.readMessageTimeout = timeout
	return
}

func (k *KafkaClient) Close() error {
	if k.p != nil {
		k.c.Close()
	}
	if k.c != nil {
		k.c.Close()
	}
	return nil
}
