// Description: 描述
package kq

import (
	"encoding/json"
	"fmt"
	"os/user"
	"strings"

	"github.com/zeromicro/go-zero/core/queue"
	"github.com/zeromicro/go-zero/core/stringx"

	"gitlab.vspncd.com/mic/common/tools"
)

type (
	CfgOption func(options *cfgOptions)

	TargetKafka interface {
		// 开启客户端
		Start()
		// 关闭客户端
		Stop()
		// 按组注册消费handle
		RegConsumer(group string, consume ConsumeHandle)
		// 发送消息
		Send(v interface{}) error
		// 带key发送消息
		SendWithKey(key string, v interface{}) error
	}

	cfgOptions struct {
		topic    string   // topic
		hosts    []string // 服务器地址集合
		mode     string   // 启动模式
		logMode  string   // 可选值：console,file,volume
		logLevel string   // 可选值：info,error,severe
	}

	defaultTargetKafka struct {
		cfg       *cfgOptions
		producer  *Pusher
		consumers map[string]map[string]queue.MessageQueue
	}
)

func WithMode(mode string) CfgOption {
	return func(options *cfgOptions) {
		options.mode = mode
	}
}

func WithLogMode(logMode string) CfgOption {
	return func(options *cfgOptions) {
		options.logMode = logMode
	}
}

func WithLogLevel(logLevel string) CfgOption {
	return func(options *cfgOptions) {
		options.logLevel = logLevel
	}
}

func NewTargetKafka(hosts []string, topic string, opts ...CfgOption) TargetKafka {
	options := new(cfgOptions)
	for _, opt := range opts {
		opt(options)
	}
	if options.mode == "" {
		options.mode = "pro"
	}
	if options.logMode == "" {
		options.logMode = "console"
	}
	if options.logLevel == "" {
		options.logLevel = "info"
	}

	target := &defaultTargetKafka{}
	target.cfg = options
	target.cfg.topic = fmt.Sprintf("%s%s", target.topicPrefix(), topic)
	target.cfg.hosts = hosts
	target.producer = NewPusher(target.cfg.hosts, target.cfg.topic)
	target.consumers = map[string]map[string]queue.MessageQueue{}

	return target
}

// 按组注册消费handle
func (k *defaultTargetKafka) RegConsumer(group string, consume ConsumeHandle) {
	if len(k.cfg.hosts) == 0 {
		panic("无可用的节点信息")
	}

	kCfg := Conf{
		Brokers:    k.cfg.hosts,
		Topic:      k.cfg.topic,
		Group:      group,
		Offset:     "last",
		Conns:      1,
		Consumers:  8,
		Processors: 8,
		MinBytes:   10240,
		MaxBytes:   10485760,
	}
	kCfg.Log.Mode = k.cfg.logMode
	kCfg.Log.Level = k.cfg.logLevel

	if _, ok := k.consumers[k.cfg.topic]; !ok {
		k.consumers[k.cfg.topic] = map[string]queue.MessageQueue{}
	}

	if _, ok := k.consumers[k.cfg.topic][group]; ok {
		panic(fmt.Sprintf("相同的group:%s", group))
	}

	k.consumers[k.cfg.topic][group] = MustNewQueue(kCfg, WithHandle(consume))
}

func (k *defaultTargetKafka) Send(v interface{}) error {
	if len(k.cfg.hosts) == 0 {
		panic("无可用的节点信息")
	}

	body, err := json.Marshal(v)
	if err != nil {
		return err
	}

	err = tools.DoWithRetry(func() error {
		return k.producer.Push(body)
	})
	if err != nil {
		return err
	}

	return nil
}

func (k *defaultTargetKafka) SendWithKey(key string, v interface{}) error {
	if len(k.cfg.hosts) == 0 {
		panic("无可用的节点信息")
	}

	body, err := json.Marshal(v)
	if err != nil {
		return err
	}

	err = tools.DoWithRetry(func() error {
		return k.producer.PushWithKey(tools.StrToBytes(key), body)
	})
	if err != nil {
		return err
	}

	return nil
}

func (k *defaultTargetKafka) Start() {
	for _, qs := range k.consumers {
		for _, q := range qs {
			go q.Start()
		}
	}
}

func (k *defaultTargetKafka) Stop() {
	for _, qs := range k.consumers {
		for _, q := range qs {
			// go q.Stop()
			q.Stop()
		}
	}
}

func (k *defaultTargetKafka) topicPrefix() string {
	prefix := ""
	switch k.cfg.mode {
	case "dev":
		u, err := user.Current()
		if err != nil {
			prefix = stringx.Rand() + "_"
		} else {
			prefix = strings.Split(u.Username, "\\")[0] + "_"
		}
	case "test":
		prefix = "test_"
	case "pre":
		prefix = "pre_"
	case "pro":
	default:
		panic("环境变量有问题")
	}
	return prefix
}
