package main

import (
	"bytes"
	"encoding/json"
	"github.com/IBM/sarama"
	"github.com/sirupsen/logrus"
	"kakfa-lag-monitory/config"
	"log"
	"net/http"
	"strconv"
	"strings"
	"time"
)

//堆积记录
type LagRecord struct {
	topicName string
	count     int
	startTime string
}

//添加堆积记录
func addLagRecord(topicName string) LagRecord {
	startTime := time.Now().Format("15:04:05")
	return LagRecord{topicName: topicName, count: 1, startTime: startTime}
}

//堆积map
var lagMap map[string]LagRecord

//消费组map
var groupLagMap map[string]map[string]LagRecord

func main() {
	log.Print("欢迎使用kafka消息堆积监控")
	groupLagMap = make(map[string]map[string]LagRecord)
	brokers := strings.Split(config.GetConfig().Kafka.Brokers, ",")
	consumerGroups := strings.Split(config.GetConfig().Kafka.ConsumerGroup, ",")
	//定时任务
	ticker := time.NewTicker(time.Second * time.Duration(config.GetConfig().Alarm.PollSecond))
	//无限循环
	for range ticker.C {
		for _, group := range consumerGroups {
			logrus.Printf("%s-->开始统计", group)
			monitorKafka(brokers, group)
			logrus.Printf("%s-->统计完成", group)
			logrus.Printf("*************************")
		}
	}
}

//监控kafka
func monitorKafka(brokers []string, consumerGroup string) {

	saramaConfig := sarama.NewConfig()
	saramaConfig.Consumer.Return.Errors = true
	//连接到kafka集群
	client, err := sarama.NewClient(brokers, saramaConfig)
	if err != nil {
		logrus.Fatalf("连接kafka失败", err)
		return
	}
	defer client.Close()

	manager, err := sarama.NewOffsetManagerFromClient(consumerGroup, client)
	topics, err := client.Topics()
	for _, topicName := range topics {
		if topicName == "__consumer_offsets" {
			continue
		}
		topicPartitions, _ := client.Partitions(topicName)
		var lagSum int64 = 0
		for _, partitionID := range topicPartitions {
			//获取指定topic、分区的当前offset信息
			currentOffsetResponse, err := client.GetOffset(topicName, partitionID, -1)
			if err != nil {
				logrus.Fatalf("无法获取topic[%s]和分区[%d]的当前偏移量", topicName, partitionID)
			}
			partitionOffsetManager, err := manager.ManagePartition(topicName, partitionID)
			if err != nil {
				logrus.Fatalln(err)
			}
			var backlog int64
			nextOffset, string := partitionOffsetManager.NextOffset()
			if string != "" {
				logrus.Println(string)
			}
			if nextOffset == -1 {
				backlog = 0
			} else {
				backlog = currentOffsetResponse - nextOffset
			}
			lagSum += backlog
		}
		logrus.Printf("topic(%s),数据堆积量：%d", topicName, lagSum)
		alarmNotice(int(lagSum), topicName, consumerGroup)
	}
}

//告警计算
func alarmNotice(lagSum int, topicName string, consumerGroup string) {
	groupMap := groupLagMap[consumerGroup]
	if groupMap == nil {
		groupMap = make(map[string]LagRecord)
	}
	lagRecord := groupMap[topicName]
	//如果大于阈值
	if lagSum > config.GetConfig().Alarm.Threshold {
		//记录信息为空，添加记录信息
		if lagRecord.topicName == "" {
			lagRecord = addLagRecord(topicName)
			groupMap[topicName] = lagRecord
		} else {
			//在原有的记录上加1
			lagRecord.count++
			groupMap[topicName] = lagRecord
		}
	} else {
		if lagRecord.topicName != "" && lagRecord.count > 0 {
			//在原有的记录上减1
			lagRecord.count--
			groupMap[topicName] = lagRecord
		}
	}
	//表示没有超过阈值
	if lagRecord.topicName == "" {
		return
	}
	//符合条件，执行告警
	if lagRecord.count == config.GetConfig().Alarm.Count {
		logrus.Println("符合告警条件，执行webhook")
		execWebHook(lagSum, lagRecord, consumerGroup)
		lagRecord.count = 0
		groupMap[topicName] = lagRecord
	}
	groupLagMap[consumerGroup] = groupMap
}

/**
执行webhook
*/
func execWebHook(lagSum int, lagRecord LagRecord, consumerGroup string) {
	s := config.GetConfig().Alarm.NoticeTemplate
	if s == "" {
		s = "kafka名称：{kafka_name}数据堆积 \nbrokers：{brokers} \n消费组：[{consumer_group}] \n监听的topic：{topic} \n发生时间：{start_time}-{end_time} \n触发值：{alarm_threshold} \n累计次数：{count} \n最后一次堆积量：{lag_count}"
	}
	msg := strings.Replace(s, "{kafka_name}", config.GetConfig().Kafka.KafkaName, 1)
	msg = strings.Replace(msg, "{brokers}", config.GetConfig().Kafka.Brokers, 1)
	msg = strings.Replace(msg, "{consumer_group}", consumerGroup, 1)
	msg = strings.Replace(msg, "{topic}", lagRecord.topicName, 1)
	msg = strings.Replace(msg, "{alarm_threshold}", strconv.Itoa(config.GetConfig().Alarm.Threshold), 1)
	msg = strings.Replace(msg, "{start_time}", lagRecord.startTime, 1)
	msg = strings.Replace(msg, "{end_time}", time.Now().Format("15:04:05"), 1)
	msg = strings.Replace(msg, "{count}", strconv.Itoa(config.GetConfig().Alarm.Count), 1)
	msg = strings.Replace(msg, "{lag_count}", strconv.Itoa(lagSum), 1)
	logrus.Printf("告警信息：%s", msg)
	//调用webhook
	data := map[string]interface{}{"msg": msg, "appid": config.GetConfig().Appid}
	jsonStr, _ := json.Marshal(data)
	request, e := http.NewRequest("POST", config.GetConfig().Alarm.Webhook, bytes.NewBuffer(jsonStr))
	if e != nil {
		logrus.Println("请求webhook失败")
		return
	}
	request.Header.Set("Content-Type", "application/json")
	client := &http.Client{}
	response, e := client.Do(request)
	if e != nil {
		logrus.Println("请求webhook失败:", e)
		return
	}
	defer response.Body.Close()
	logrus.Print("响应结果:", string(response.Status))
}
