package main

import (
	"context"
	"os"
	"os/signal"
	"pd/server/com"
	"pd/server/plugin/logger"
	kf "pd/server/plugin/mq"
	"sync"
	"syscall"
	"time"

	kafka "github.com/IBM/sarama"
	"go.uber.org/atomic"
)

func getDelayParams(msg *kafka.ConsumerMessage) (srcTopic string, expTime time.Time) {
	for _, header := range msg.Headers {
		key := string(header.Key)
		switch key {
		case kf.KafkaDelayMessageSourceTopic:
			srcTopic = string(header.Value)
		case kf.KafkaMessageProduceTimestamp:
			ts, _ := com.ToInt64(string(header.Value))
			expTime = time.Unix(ts, 0)
		}
	}

	return
}

const defaultTimeout = 30 * time.Second

func handleMessage(s kafka.ConsumerGroupSession, p kafka.SyncProducer, msg *kafka.ConsumerMessage) (bool, time.Duration) {
	now := time.Now()
	srcTopic, expTime := getDelayParams(msg)
	logger.Infof("topic[%s(%d)] srcTopic[%s] expireTime[%s] msg[%s] isok[%v]\n", msg.Topic, msg.Offset, srcTopic, expTime.Format("2006-01-02 15:04:05"), string(msg.Value), expTime.Before(now))
	if expTime.Before(now) { // 消息已到期,转发到原队列
		m := &kafka.ProducerMessage{
			Topic: srcTopic,
			Value: kafka.StringEncoder(msg.Value),
		}
		for _, h := range msg.Headers {
			m.Headers = append(m.Headers, *h)
		}

		_, _, err := p.SendMessage(m)
		if err == nil {
			//	logger.Infof("topic[%s(%d)] transfer srcTopic[%s] offset[%d] msg[%s] error[%v]", msg.Topic, msg.Offset, srcTopic, offset, string(msg.Value), err)
			s.MarkMessage(msg, "")
			s.Commit()
			return true, 0
		} else {
			return false, 5 * time.Second
		}
	} else { // 未到期, offset前移，继续等待
		return false, expTime.Sub(now)
	}
}

type HandleFunc func(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error

func NewHandler(p kafka.SyncProducer, f HandleFunc) *Handler {
	return &Handler{
		producer: p,
		fn:       f,
	}
}

type Handler struct {
	fn       HandleFunc
	producer kafka.SyncProducer
}

// Setup is run at the beginning of a new session, before ConsumeClaim.
func (h *Handler) Setup(kafka.ConsumerGroupSession) error {
	logger.Info("=========>session setup")
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
// but before the offsets are committed for the very last time.
func (h *Handler) Cleanup(kafka.ConsumerGroupSession) error {
	logger.Info("=========>session cleanup")
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
// Once the Messages() channel is closed, the Handler must finish its processing
// loop and exit.
func (h *Handler) ConsumeClaim(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error {
	return h.fn(s, c)
}

func DelayTransfer(ctx context.Context, wg *sync.WaitGroup, topic string) {
	defer wg.Done()

	config := kafka.NewConfig()
	config.Metadata.AllowAutoTopicCreation = true
	config.Consumer.Offsets.AutoCommit.Enable = false
	config.Consumer.Offsets.Initial = kafka.OffsetOldest
	// config.Consumer.Offsets.AutoCommit.Interval = time.Second
	config.Consumer.Group.Rebalance.Retry.Max = 0x7fffffff
	config.Consumer.Return.Errors = true
	// config.Consumer.Group.Session.Timeout = 5 * time.Second
	//	c, err := kafka.NewConsumer([]string{GetServer()}, config)

	conf := kafka.NewConfig()
	conf.Producer.Return.Successes = true
	conf.Producer.Partitioner = kafka.NewRandomPartitioner
	p, err := kafka.NewSyncProducer([]string{kf.GetServer()}, conf)
	if err != nil {
		panic(err)
	}

	logger.Info("start======")
	c2, err := kafka.NewConsumerGroup([]string{kf.GetServer()}, "main", config)
	if err != nil {
		panic(err)
	}

	run := atomic.NewBool(true)
	for run.Load() {
		err = c2.Consume(context.Background(), []string{topic}, NewHandler(p, func(s kafka.ConsumerGroupSession, c kafka.ConsumerGroupClaim) error {
			var ticker <-chan time.Time
			wait := false
			for {
				var msg *kafka.ConsumerMessage
				if wait {
					select {
					case <-ticker:
						return nil
					case <-ctx.Done():
						logger.Info("got exit signal")
						run.Store(false)
						return nil
					}
				} else {
					select {
					case msg = <-c.Messages():
						if msg == nil {
							return nil
						}
						ok, timeout := handleMessage(s, p, msg)
						if ok {
							continue
						} else {
							ticker = time.After(timeout)
							wait = true
							continue
						}
					case <-ctx.Done():
						logger.Info("got exit signal")
						run.Store(false)
						return nil
					}
				}
			}
		}))
		if err != nil {
			logger.Error(err)
		}
	}
}

func main() {
	ctx, cancel := context.WithCancel(context.Background())
	wg := sync.WaitGroup{}
	exit := make(chan os.Signal)
	signal.Notify(exit, syscall.SIGINT, syscall.SIGTERM, os.Kill)

	wg.Add(len(kf.LevelList))

	for _, level := range kf.LevelList {
		go DelayTransfer(ctx, &wg, kf.KafkaDelayQueuePrefix+level.Suffix)
		//	go DelayTransfer(ctx, &wg, level.Suffix)
	}

	<-exit
	cancel()

	wg.Wait()
}
