package dag_plugin_kafkaconsumer

import (
	"context"
	"dag2.0/gat1400-proxy/concurrent"
	"dag2.0/gat1400-proxy/kafka"
	"dag2.0/gat1400-proxy/logger"
	"dag2.0/gat1400-proxy/operator"
	"dag2.0/gat1400-proxy/util/times"
	"errors"
	"fmt"
	"github.com/Shopify/sarama"
	cluster "github.com/bsm/sarama-cluster"
	"github.com/rcrowley/go-metrics"
	"strconv"
	"strings"
	"sync"
	"time"
)

const INTERVAL_PERIOD = 24

type FromOffset int

const (
	_FROM_OFFSET_NEWEST FromOffset = 0
	_FROM_OFFSET_OLDEST FromOffset = 1
)

type KafkaConsumer struct {
	sync.Mutex

	Bootstrap []string
	Topics    []string
	GroupId   string
	FromOffset
	BatchSize    int
	BatchDelay   int
	parallelSize int
	topicSleep   map[string]time.Duration
	startTime    string
	endTime      string
	kafkaVersion string

	//kafkaConsumer *cluster.Consumer
	executor  *concurrent.Executor
	emit      func(interface{}) error
	cancelCtx context.Context
	cancel    func()
}

func (consumer *KafkaConsumer) Init(config operator.Config, emit func(interface{}) error) error {
	logger.LOG_INFO("启动kafka-consumer")
	_ = consumer.Close()
	consumer.emit = emit
	logger.LOG_WARN("---------------- kafkaconsumer config ----------------")
	logger.LOG_WARN("kafkaconsumer_bootstrap : " + config.GetString("kafkaconsumer_bootstrap"))
	logger.LOG_WARN("kafkaconsumer_topics : " + config.GetString("kafkaconsumer_topics"))
	logger.LOG_WARN("kafkaconsumer_groupId : " + config.GetString("kafkaconsumer_groupId"))
	logger.LOG_WARN("kafkaconsumer_fromEarliestOffset : " + strconv.FormatBool(config.GetBool("kafkaconsumer_fromEarliestOffset")))
	logger.LOG_WARN("kafkaconsumer_batchSize : " + strconv.Itoa(config.GetInt("kafkaconsumer_batchSize")))
	logger.LOG_WARN("kafkaconsumer_batchDelay : " + strconv.Itoa(config.GetInt("kafkaconsumer_batchDelay")))
	logger.LOG_WARN("kafkaconsumer_parallel : " + strconv.Itoa(config.GetInt("kafkaconsumer_parallel")))
	logger.LOG_WARN("kafkaconsumer_parallelSize : " + strconv.Itoa(config.GetInt("kafkaconsumer_parallelSize")))
	logger.LOG_WARN("kafkaconsumer_topicSleep : " + config.GetString("kafkaconsumer_topicSleep"))
	logger.LOG_WARN("kafkaconsumer_kafkaVersion : " + config.GetString("kafkaconsumer_kafkaVersion"))
	logger.LOG_WARN("kafkaconsumer_startTime : " + config.GetString("kafkaconsumer_startTime"))
	logger.LOG_WARN("kafkaconsumer_endTime : " + config.GetString("kafkaconsumer_endTime"))
	logger.LOG_WARN("------------------------------------------------------")
	if config.GetString("kafkaconsumer_bootstrap") == "" || config.GetString("kafkaconsumer_topics") == "" || config.GetString("kafkaconsumer_groupId") == "" {
		return errors.New("缺少配置：kafkaconsumer_bootstrap,kafkaconsumer_topics,kafkaconsumer_groupId")
	}
	consumer.Bootstrap = strings.Split(strings.Trim(config.GetString("kafkaconsumer_bootstrap"), " "), ",")
	consumer.Topics = strings.Split(strings.Trim(config.GetString("kafkaconsumer_topics"), " "), ",")
	consumer.GroupId = config.GetString("kafkaconsumer_groupId")
	consumer.FromOffset = _FROM_OFFSET_NEWEST
	if config.GetBool("kafkaconsumer_fromEarliestOffset") {
		consumer.FromOffset = _FROM_OFFSET_OLDEST
	}
	consumer.BatchSize = config.GetInt("kafkaconsumer_batchSize")
	if consumer.BatchSize < 1 {
		consumer.BatchSize = 1
	}
	consumer.BatchDelay = config.GetInt("kafkaconsumer_batchDelay")

	parallel := config.GetInt("kafkaconsumer_parallel")
	parallelSize := config.GetInt("kafkaconsumer_parallelSize")
	if parallel <= 0 {
		parallel = consumer.BatchSize
	}
	if parallelSize <= 0 {
		parallelSize = 1
	}
	consumer.executor = concurrent.NewExecutor(parallel)
	consumer.parallelSize = parallelSize
	consumer.initTopicSleep(config.GetString("kafkaconsumer_topicSleep"))
	consumer.kafkaVersion = config.GetString("kafkaconsumer_kafkaVersion")
	consumer.startTime = config.GetString("kafkaconsumer_startTime")
	consumer.endTime = config.GetString("kafkaconsumer_endTime")
	go consumer.Start()
	return nil
}

func (c *KafkaConsumer) initTopicSleep(topicSleep string) {
	c.topicSleep = make(map[string]time.Duration)
	topicSleep = strings.Trim(topicSleep, " ")
	if len(topicSleep) == 0 {
		return
	}
	ts, err := strconv.Atoi(topicSleep)
	//所有topic均消费后sleep
	if err == nil && ts > 0 {
		for _, t := range c.Topics {
			c.topicSleep[t] = time.Duration(ts) * time.Millisecond
		}
		return
	}
	//指定topic消费后sleep
	var rels []string
	if strings.Index(topicSleep, ",") >= 0 {
		rels = strings.Split(topicSleep, ",")
	} else {
		rels = strings.Split(topicSleep, "\n")
	}
	for _, rel := range rels {
		rel = strings.Trim(strings.Trim(rel, "\n"), " ")
		relM := strings.Split(rel, ":")
		if len(relM) != 2 {
			logger.LOG_WARN("topic sleep 配置参数不合法:" + rel)
			continue
		}
		ts, err := strconv.Atoi(relM[1])
		if err != nil {
			logger.LOG_WARN("topic sleep 配置参数不合法:" + relM[1])
			continue
		}
		c.topicSleep[relM[0]] = time.Duration(ts) * time.Millisecond
	}
}

type MeterLog struct {
}

func (f *MeterLog) Printf(format string, v ...interface{}) {
	logger.LOG_INFO(fmt.Sprintf(format, v...))
}

func (c *KafkaConsumer) Start() {
	cancelCtx, cancel := context.WithCancel(context.Background())
	c.cancel = cancel
	c.cancelCtx = cancelCtx

	config := cluster.NewConfig()
	config.Consumer.Return.Errors = true
	config.Group.Return.Notifications = true
	config.Version = sarama.V2_3_0_0
	if c.kafkaVersion != "" {
		v, err := sarama.ParseKafkaVersion(c.kafkaVersion)
		if err != nil {
			logger.LOG_ERROR("ParseKafkaVersion is error", err)
		} else {
			config.Version = v
		}
	}
	config.Net.DialTimeout = 5 * time.Second
	if c.FromOffset == _FROM_OFFSET_NEWEST {
		config.Consumer.Offsets.Initial = sarama.OffsetNewest
	} else {
		config.Consumer.Offsets.Initial = sarama.OffsetOldest
	}
	for _, topic := range c.Topics {
		go func(topic string) {
		OUT_LOOP:
			for {
				select {
				case <-cancelCtx.Done():
					break OUT_LOOP
				default:
					consumer, err := cluster.NewConsumer(c.Bootstrap, c.GroupId, []string{topic}, config)
					if err == nil {
						ctx, cancel := context.WithCancel(c.cancelCtx)
						go c.handleErrors(consumer, ctx)
						go c.handleNotifications(consumer, ctx)
						c.consumeAndSend(consumer, c.topicSleep[topic])
						cancel()
						break
					} else {
						logger.LOG_ERROR("创建 kafka consumer 失败，重新连接：", err)
						time.Sleep(1000 * time.Millisecond)
					}
				}
			}
		}(topic)
	}
	select {
	case <-cancelCtx.Done():
		break
	}
}

func startTicker(hour int) *time.Ticker {
	t := time.Now()
	nextTick := time.Date(t.Year(), t.Month(), t.Day(), hour, t.Minute(), t.Second(), t.Nanosecond(), times.CstZone)
	if !nextTick.After(time.Now()) {
		nextTick = nextTick.Add(INTERVAL_PERIOD)
	}
	fmt.Println(nextTick, "- next tick")
	diff := nextTick.Sub(time.Now())
	return time.NewTicker(diff)
}

func (c *KafkaConsumer) consumeAndSend(consumer *cluster.Consumer, sleep time.Duration) {
	ctx, cancel := context.WithCancel(c.cancelCtx)
	defer func() {
		cancel()
		_ = consumer.Close()
	}()

	batch := make([]*sarama.ConsumerMessage, 0)
	batchSize := c.BatchSize
	var batchDelay time.Duration = 1 * time.Millisecond
	if c.BatchDelay > 0 {
		batchDelay = time.Duration(c.BatchDelay) * time.Millisecond
	}
	idleDelay := time.NewTimer(batchDelay)
	offsetSubmiter := &BatchOffsetSubmiter{
		ctx:      ctx,
		consumer: consumer,
		capacity: c.parallelSize * 2,
	}

OUT_LOOP:
	for {
		batch = make([]*sarama.ConsumerMessage, 0)
		if !idleDelay.Stop() {
			select {
			case <-idleDelay.C: //try to drain from the channel
			default:
			}
		}
		idleDelay.Reset(batchDelay)
		//消费循环
	CONSUME_LOOP:
		for {
			if c.startTime != "" && c.endTime != "" {
				t := time.Now()
				temp := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), times.CstZone)
				startTime, _ := strconv.Atoi(c.startTime)
				endTime, _ := strconv.Atoi(c.endTime)
				if startTime > endTime {
					if temp.Minute() <= startTime && temp.Minute() >= endTime {
						time.Sleep(time.Second * 10)
						continue
					}
				} else {
					if temp.Minute() <= startTime || temp.Minute() >= endTime {
						time.Sleep(time.Second * 10)
						continue
					}
				}
			}
			select {
			//正常消费
			case msg, ok := <-consumer.Messages():
				if ok {
					batch = append(batch, msg)
					//批次满
					if len(batch) >= batchSize {
						break CONSUME_LOOP
					}
				} else {
					//消费异常时终止，重建consumer
					logger.LOG_WARN("kafka消费失败", nil)
					break OUT_LOOP
				}
			//批量超时
			case <-idleDelay.C:
				break CONSUME_LOOP
			//终止
			case <-c.cancelCtx.Done():
				break OUT_LOOP
			}
		}
		//处理数据
		if len(batch) == 0 {
			continue
		}
		func(msgs []*sarama.ConsumerMessage) {
			bs := &BatchState{
				msgs:   msgs,
				finish: false,
			}
			offsetSubmiter.addBatch(bs)
			var castMsgs []*kafka.KafkaMessage
			for _, msg := range msgs {
				castMsgs = append(castMsgs, castKafkaMessage(msg))
			}
			logger.LOG_DEBUG("kafkaconsumer receive batch msg,", len(castMsgs))

			_ = c.executor.Submit(func() {
				_ = c.emit(castMsgs)
				logger.LOG_DEBUG("发送成功,", len(castMsgs))
				//submit offset
				bs.finish = true
				offsetSubmiter.submit()
			})
		}(batch)
		//睡眠
		if sleep > 0 {
			time.Sleep(sleep)
		}
	}
}
func (c *KafkaConsumer) handleErrors(consumer *cluster.Consumer, ctx context.Context) {
OUT_LOOP:
	for {
		select {
		case err, ok := <-consumer.Errors():
			if ok {
				logger.LOG_WARN("kafka消费异常", err)
			} else {
				break OUT_LOOP
			}
		case <-ctx.Done():
			break OUT_LOOP
		}
	}
}

func (c *KafkaConsumer) handleNotifications(consumer *cluster.Consumer, ctx context.Context) {
OUT_LOOP:
	for {
		select {
		case ntf, ok := <-consumer.Notifications():
			if ok {
				logger.LOG_WARN(ntf, nil)
			} else {
				break OUT_LOOP
			}
		case <-ctx.Done():
			break OUT_LOOP
		}
	}
}

func castKafkaMessage(msg *sarama.ConsumerMessage) *kafka.KafkaMessage {
	return &kafka.KafkaMessage{
		Key:       msg.Key,
		Value:     msg.Value,
		Topic:     msg.Topic,
		Partition: msg.Partition,
		Offset:    msg.Offset,
		Headers: func() []*kafka.MessageHeader {
			var hs []*kafka.MessageHeader
			if len(msg.Headers) > 0 {
				for _, h := range msg.Headers {
					hs = append(hs, &kafka.MessageHeader{
						Key:   h.Key,
						Value: h.Value,
					})
				}
			}
			return hs
		}(),
		Timestamp:      msg.Timestamp,
		BlockTimestamp: msg.BlockTimestamp,
	}
}

func (c *KafkaConsumer) Close() error {
	if c.executor != nil {
		c.executor.Close()
	}
	if c.cancel != nil {
		c.cancel()
	}
	metrics.Unregister("kafkaconsumer handle data")
	return nil
}

type BatchState struct {
	msgs   []*sarama.ConsumerMessage
	finish bool
}

type BatchOffsetSubmiter struct {
	sync.Mutex

	capacity int

	ctx      context.Context
	consumer *cluster.Consumer
	batchs   []*BatchState
	finish   bool
}

func (bos *BatchOffsetSubmiter) addBatch(bs *BatchState) {
	for {
		bos.Lock()
		if len(bos.batchs) < bos.capacity {
			bos.batchs = append(bos.batchs, bs)
			bos.Unlock()
			break
		}
		bos.Unlock()
		time.Sleep(100 * time.Millisecond)
	}
}

func (bos *BatchOffsetSubmiter) submit() {
	bos.Lock()
	defer func() {
		bos.Unlock()
	}()
	var needCommit bool
	var commitBatchSize int
	for index, batch := range bos.batchs {
		if !batch.finish {
			break
		}
		needCommit = true
		commitBatchSize++
		logger.LOG_INFO("submit batch offset:", len(batch.msgs))
		for _, msg := range batch.msgs {
			bos.consumer.MarkOffset(msg, "")
		}
		bos.batchs[index] = nil
	}
	if needCommit {
		logger.LOG_INFO("submit batch queue:", commitBatchSize, "/", len(bos.batchs))
		bos.batchs = bos.batchs[commitBatchSize:]
		for {
			select {
			case <-bos.ctx.Done():
				return
			default:
			}
			err := bos.consumer.CommitOffsets()
			if err == nil {
				break
			}
			logger.LOG_WARN("提交kafka offset异常，重试", err)
			time.Sleep(1 * time.Second)
		}
	}
}
