package mq

import (
	"github.com/IBM/sarama"
	"strings"
)

type kafka struct {
	host  []string
	topic string
}

func NewKafka(hostCofig string) *kafka {
	return &kafka{
		host: strings.Split(hostCofig, ","),
	}
}

// 设置topic主题
func (p *kafka) Topic(topic string) *kafka {
	p.topic = topic
	return p
}

// 获取topic列表
func (p *kafka) TopicList() ([]string, error) {
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	client, clientErr := sarama.NewClient(p.host, config)
	if clientErr != nil {
		return nil, clientErr
	}
	defer client.Close()

	topics, err := client.Topics()
	if err != nil {
		return nil, err
	}
	return topics, nil
}

// 创建主题
func (p *kafka) CreateTopic(numPartitions int32, replicationFactor int16) error {
	config := sarama.NewConfig()
	admin, clientErr := sarama.NewClusterAdmin(p.host, config)
	if clientErr != nil {
		return clientErr
	}
	defer admin.Close()

	topicDetail := &sarama.TopicDetail{
		NumPartitions:     numPartitions,     //分区数量
		ReplicationFactor: replicationFactor, //副本数量
	}
	if err := admin.CreateTopic(p.topic, topicDetail, false); err != nil {
		return err
	}
	return nil
}

// 删除topic
func (p *kafka) DeleteTopic() error {
	config := sarama.NewConfig()
	admin, clientErr := sarama.NewClusterAdmin(p.host, config)
	if clientErr != nil {
		return clientErr
	}
	defer admin.Close()

	if err := admin.DeleteTopic(p.topic); err != nil {
		return err
	}
	return nil
}

// 获取主题下面的分区信息
func (p *kafka) GetPartitions() []int32 {
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	client, _ := sarama.NewClient(p.host, config)

	defer client.Close()

	admin, _ := sarama.NewClusterAdminFromClient(client)
	defer admin.Close()

	partitions, _ := client.Partitions(p.topic)
	return partitions
}

// 获取主题下面分区最新的偏移量
func (p *kafka) GetOffsetByPartitions(PartitionsId int32) int64 {
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	client, _ := sarama.NewClient(p.host, config)
	defer client.Close()

	latestOffset, _ := client.GetOffset(p.topic, PartitionsId, sarama.OffsetNewest)
	return latestOffset
}

// 指定主题的topic创建分区
func (p *kafka) CreatePartitions(numPartitions int32) error {
	config := sarama.NewConfig()
	admin, clientErr := sarama.NewClusterAdmin(p.host, config)
	if clientErr != nil {
		return clientErr
	}
	defer admin.Close()

	if err := admin.CreatePartitions(p.topic, numPartitions, nil, false); err != nil {
		return err
	}
	return nil
}

// 发送队列消息
func (p *kafka) Publish(msg string) (int32, int64, error) {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 5
	config.Producer.Return.Successes = true

	producer, clientErr := sarama.NewSyncProducer(p.host, config)
	if clientErr != nil {
		return 0, 0, clientErr
	}
	defer producer.Close()

	message := &sarama.ProducerMessage{
		Topic: p.topic,
		Value: sarama.StringEncoder(msg),
	}
	partition, offset, err := producer.SendMessage(message)
	if err != nil {
		return 0, 0, nil
	}
	return partition, offset, nil
}

// 获取消费群组信息
func (p *kafka) GetGroupInfo(groupId string) ([]map[string]interface{}, error) {
	config := sarama.NewConfig()
	admin, clientErr := sarama.NewClusterAdmin(p.host, config)
	if clientErr != nil {
		return nil, clientErr
	}
	defer admin.Close()

	var data []map[string]interface{}

	offsets, _ := admin.ListConsumerGroupOffsets(groupId, map[string][]int32{p.topic: p.GetPartitions()})
	for topicInfo, partitionOffsets := range offsets.Blocks {
		for partition, offset := range partitionOffsets {
			data = append(data, map[string]interface{}{
				"groupId":       groupId,
				"topic":         topicInfo,
				"partition":     partition,
				"consumeOffset": offset.Offset,
				"newOffset":     p.GetOffsetByPartitions(partition),
			})
		}
	}
	return data, nil
}
