# /usr/local/python36/bin/python3.6
# -*- coding:utf-8 -*-

import time, configparser, sys, os
from kafka.client import KafkaClient
from kafka.protocol.commit import OffsetFetchRequest_v1, OffsetFetchRequest_v2, OffsetFetchResponse_v2, \
    OffsetFetchResponse_v1, OffsetFetchRequest_v0, \
    OffsetFetchResponse_v0
from kafka.protocol.offset import OffsetRequest_v0, OffsetResponse_v0
from tool.sendsns import send_sns
from tool.sendemail import send_email
import logging, logging.handlers, logging.config, sys, json
from pymongo import MongoClient
from datetime import datetime
from logging.handlers import RotatingFileHandler



servers = ''
qq=''
time_interval = 60
history_time_interval = 60 * 60 * 60
duration = 0
mongodb_con = None
mongodb_client = None
client = None
conn = None
partition_cache = {}
brokers_cache = []
kafka_type = []
zk_type = []
beensent = {}


class myconf(configparser.ConfigParser):
    def optionxform(self, optionstr):
        return optionstr


config = '/usr/local/python36/conf/monitoring_kafka/kafka.ini'
kafkaconf = myconf()
kafkaconf.read(config)
all_kafka_office = {}

#短信格式
monitoring_fault = "服务器：kafka 恢复：kafka lag积累过多故障！问题详情：%s lag 堆积量 %s"
monitoring_restore = "服务器：kafka 发生：kafka lag积累过多故障！问题详情：%s lag 堆积量 %s"

logging.config.fileConfig("..\logging.conf")
logger = logging.getLogger("KAFKA")


def send_s(gk, tk, la, status):
    status[tk] = la
    for k, v in status.items():
        value = int(kafkaconf.get('lagpolice', k))
        try:
            if beensent[k]:
                if value < v:
                    send_monitoring_fault = monitoring_fault % (k, v)
                    logger.error("恢复故障: " + send_monitoring_fault)
                    send_sns(send_monitoring_fault)
                    # for qq in qqemail:
                    #     send_email(qq, send_monitoring_fault)
                    beensent.pop(k)
                else:
                    logger.dubug("===============已经发送报警，无需再次发送==============")
        except:
            if value <= v:
                beensent[k] = v
                send_monitoring_restore = monitoring_restore % (k, v)
                logger.info("发生故障" + send_monitoring_restore)
                send_sns(send_monitoring_restore)
                # send_email(qq, send_monitoring_restore)


def get_brokers():
    if not brokers_cache:
        brokers = client.cluster.brokers()
        if brokers:
            brokers_cache.extend([x.nodeId for x in brokers])
    return brokers_cache


def get_partitions(topic):
    if not partition_cache or topic not in partition_cache:
        partitions = client.cluster.available_partitions_for_topic(topic)
        if partitions:
            partition_cache[topic] = [x for x in partitions]
        else:
            return []
    return partition_cache[topic]


def append(rdict, pdict):
    if rdict:
        for k, v in pdict.items():
            if k in rdict:
                rdict[k] = rdict[k] + v
            else:
                rdict[k] = v
    else:
        rdict.update(pdict)


def parse_logsize(t, p, responses):
    for response in responses:
        if not isinstance(response, OffsetResponse_v0):
            return {}
        tps = response.topics
        topic = tps[0][0]
        partition_list = tps[0][1]
        partition = partition_list[0][0]
        # 异步poll来的数据可能不准
        if topic == t and partition == p and partition_list[0][1] == 0:
            logsize_list = partition_list[0][2]
            logsize = logsize_list[0]
            return {partition: logsize}
    return {}


def parse_offsets(t, responses):
    dr = {}
    for response in responses:
        if not isinstance(response, (OffsetFetchResponse_v1, OffsetFetchResponse_v0)):
            return {}
        tps = response.topics
        topic = tps[0][0]
        partition_list = tps[0][1]
        if topic == t:
            for partition_tunple in partition_list:
                if partition_tunple[3] == 0:
                    offset = partition_tunple[1]
                    dr[partition_tunple[0]] = offset
    return dr


def get_logsize():
    tp = {}
    brokers = get_brokers()
    for topic in topics:
        partitions = get_partitions(topic)
        pl = {}
        for broker in brokers:
            for partition in partitions:
                client.send(broker, OffsetRequest_v0(replica_id=-1, topics=[(topic, [(partition, -1, 1)])]))
                responses = client.poll()
                pdict = parse_logsize(topic, partition, responses)
                append(pl, pdict)
        tp[topic] = pl
    return tp


def get_offsets():
    global topics
    gd = {}
    con = kafkaconf.options('consumers')
    topics3 = []
    for gid in con:
        topics1 = kafkaconf.get('consumers', gid)
        topics2 = topics1.split('|')
        td = {}
        for topic in topics2:
            topics3.append(topic)
            pd = {}
            for broker in get_brokers():
                partitions = get_partitions(topic)
                if not partitions:
                    return {}
                else:
                    responses = optionnal_send(broker, gid, topic, partitions)
                    da = parse_offsets(topic, responses)
                    dr = {}
                    if da != {}:
                        try:
                            if all_kafka_office[topic] < da[0]:
                                all_kafka_office[topic] = da[0]
                                dr[0] = all_kafka_office[topic]
                        except:
                            all_kafka_office[topic] = da[0]
                            dr = da
                    append(pd, dr)
            td[topic] = pd  # 格式例如：{'alarmsend': {0: 10}}，0表示分区，10表示offset
        gd[gid] = td
    topics = {}.fromkeys(topics3).keys()
    return gd


# 循环匹配所有group和topic
# def get_offsets():
#     gd = {}
#     for gid in monitor_group_ids:
#         print('haha',2)
#         td = {}
#         for topic in topics:
#             pd = {}
#             for broker in get_brokers():
#                 partitions = get_partitions(topic)
#                 if not partitions:
#                     return {}
#                 else:
#                     responses = optionnal_send(broker, gid, topic, partitions)
#                     dr = parse_offsets(topic, responses)
#                     append(pd, dr)
#             td[topic] = pd
#             print(td)
#         gd[gid] = td
#     return gd


def optionnal_send(broker, gid, topic, partitions):
    if gid in kafka_type:
        return kafka_send(broker, gid, topic, partitions)
    elif gid in zk_type:
        return zk_send(broker, gid, topic, partitions)
    else:
        # responses = kafka_send(broker, gid, topic, partitions)
        # dct = parse_offsets(topic, responses)
        # if is_suitable(dct):
        #    zk_type.append(gid)
        #    return responses
        responses = kafka_send(broker, gid, topic, partitions)
        dct = parse_offsets(topic, responses)
        if is_suitable(dct):
            kafka_type.append(gid)
        return responses


def is_suitable(dct):
    for x in dct.values():
        if x != -1:
            return True


def kafka_send(broker, gid, topic, partitions):
    client.send(broker, OffsetFetchRequest_v1(consumer_group=gid, topics=[(topic, partitions)]))
    client_value = client.poll()
    return client_value


def zk_send(broker, gid, topic, partitions):
    client.send(broker, OffsetFetchRequest_v0(consumer_group=gid, topics=[(topic, partitions)]))
    return client.poll()

def write_mongodb(gk, tk, pk, lo, of, la):
    topic_json = {}
    date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    topic_json["Group"] = gk
    topic_json["Topic"] = tk
    topic_json["Partition"] = pk
    topic_json["Logsize"] = lo
    topic_json["Offset"] = of
    topic_json["Lag"] = la
    topic_json["datetime"] = date
    mongodb_client.topicstatus.insert_one(topic_json)

def do_task():
    offset_dict = get_offsets()
    logsize_dict = get_logsize()
    for gk, gv in offset_dict.items():
        for tk, tv in gv.items():
            for pk, pv in tv.items():
                if logsize_dict and tk in logsize_dict:
                    dr = logsize_dict[tk]  # partition:logsize
                    if dr and pk in dr:
                        la = dr[pk] - pv
                        status = {}
                        if pv != -1:
                            send_s(gk, tk, la, status)
                        write_mongodb(gk, tk, pk, dr[pk], pv, la)
                        param = ("Group:%s, Topic:%s, Partition:%s, Logsize:%s, Offset:%s, Lag:%s, datetime:%s" % (
                        gk, tk, pk, dr[pk], pv, la,datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
                        logger.info(param)

if __name__ == "__main__":
   try:
       client = KafkaClient(bootstrap_servers=servers, request_timeout_ms=3000)
       mongodb_con = mongo_client = MongoClient('mongodb://用户名:密码@地址:端口/集合名称?')
       mongodb_client =  mongo_client.kafkadb
       while True:
           do_task()
           time.sleep(time_interval)
           duration += time_interval
   except:
       logger.error("连接kafka失败")
#     client = KafkaClient(bootstrap_servers=servers, request_timeout_ms=3000)
#     mongodb_con = mongo_client = MongoClient('mongodb://用户名:密码@地址:端口/集合名称?')
#     mongodb_client =  mongo_client.kafkadb
#     while True:
#         do_task()
#         time.sleep(60)
        #duration += time_interval
