package listen

import (
	"encoding/json"
	"fmt"
	kafkamodel "gitee.com/zaper/kafka-service/model"
	"gitee.com/zaper/kafka-service/service/consumer/consumer_model"
	metamgr "gitee.com/zaper/kafka-service/service/metadata"
	logger "gitee.com/zaper/private-logger"
	"gitee.com/zaper/service-config-client/client/listen/flush"
	"gitee.com/zaper/service-config-client/client/listen/submit"
	"gitee.com/zaper/service-config-client/constant"
	"gitee.com/zaper/service-config-client/os_env"
	"gitee.com/zaper/service-config-client/util"
	"os"
	"runtime/debug"
	"strings"
	"time"
)

type LisManager struct {
	tpName        string
	consumerChan  *consumer_model.ConsumerChan
	messQueueMap  map[string]*util.JumpConsumer
	ListenErrChan chan struct{}
}

func NewLisManager(ch *consumer_model.ConsumerChan) *LisManager {
	registerNotify := fmt.Sprintf("%s_%s", constant.AllConfig, os.Getenv(os_env.ServiceIdKey))
	var queueMap = map[string]*util.JumpConsumer{
		constant.GaussDB:   util.NewJumpConsumer(720),
		constant.RedisDB:   util.NewJumpConsumer(720),
		constant.Other:     util.NewJumpConsumer(720),
		constant.AllConfig: util.NewJumpConsumer(100),
		// 大容量场景下，此值只需负责当前实例自身，无需考虑其他服务。解决大容量场景下挂起协程太多问题。
		// 此键使用的是服务实例的实例id
		registerNotify: util.NewJumpConsumer(100),
	}
	return &LisManager{
		consumerChan:  ch,
		messQueueMap:  queueMap,
		ListenErrChan: make(chan struct{}),
	}
}

func (lm *LisManager) HandlerKafka() {
	defer func() {
		e := recover()
		if e != nil {
			logger.Errorf("listen kafka err, err %s", debug.Stack())
		}
		// kafka 消费消息时出现异常，代码崩溃信号
		lm.ListenErrChan <- struct{}{}
	}()

	for mess := range lm.consumerChan.MessChan {
		var m = &kafkamodel.MessModel{}
		kafkamodel.ParseConsumerMess(m, &mess)
		var configModel ConfigModel
		err := json.Unmarshal(m.Value, &configModel)
		if err != nil {
			logger.Infof("message Unmarshal fail %s", err)
			continue
		}
		// 非当前消息时候continue丢弃
		if !strings.Contains(configModel.ServiceList, os.Getenv(os_env.ServiceIdKey)) {
			logger.Infof("Current mess is not me, continue")
			continue
		}
		lm.messQueueMap[string(m.Key)].Push(m)
	}
}

func (lm *LisManager) HandlerMessSchedule() {
	for _, q := range lm.messQueueMap {
		go handlerMessage(q)
	}
}

func handlerMessage(queue *util.JumpConsumer) {
	for {
		func() {
			defer func() {
				e := recover()
				if e != nil {
					logger.Warnf("handlerMessage exception err is %v,stack is %s", e, debug.Stack())
					return
				}
			}()
			mess := queue.Poll()
			m, ok := mess.(*kafkamodel.MessModel)
			if !ok {
				logger.Warnf("kafka mess err, %s", mess)
				return
			}
			var configModel *ConfigModel
			err := json.Unmarshal(m.Value, &configModel)
			if err != nil {
				logger.Warnf("kafka mess exception, err %v", err)
				return
			}
			flushRt := true
			for _, kv := range configModel.ConfigKeyValueList {
				messFlushTool := flush.GetMessFlush(kv.Key)
				if messFlushTool == nil {
					continue
				}
				flushRt = messFlushTool.Flush(kv.Value)
				if !flushRt {
					break
				}
			}
			if flushRt {
				submit.SubmitStatus(configModel.UUID, string(m.Key))
			}
		}()
	}
}

func LoopCheckKafkaTopicOnline(topicName string, delay int64) bool {
	for {
		logger.Infof("Check kafka topic online. topName is %s", topicName)
		topic := metamgr.GetMetadataManager().GetTopicByName(topicName)
		if topic != nil {
			return true
		}
		time.Sleep(time.Duration(delay) * time.Second)
	}
}
