#!/usr/bin/env python
# encoding: utf-8

"""
@version: 0.1
@author: Kouen
@license: Apache Licence
@email: jobkouen@outlook.com
@software: PyCharm Community Edition
@file: WrongLogic.py
@time: 2017/3/30 上午9:54
@简介:
    1. 预警逻辑判断
    2. 文件名称修改
"""
from functools import reduce
import CacheMemroy
import SimpleMonitorConfig
import DevConst as Const
from optparse import OptionParser
import json
import re
import requests
import os
import datetime
import time
from glob import glob
from threading import Thread
from requests.auth import HTTPBasicAuth


def notice(msg):
    send_msg(msg)
    filename = datetime.datetime.now().strftime("%Y-%m-%d")
    if not os.path.isfile('./Notice/%s' % filename):
        os.system('touch ./Notice/%s' % filename)
    os.system('echo "%s" >> ./Notice/%s' % (msg, filename))


def get_token():
    url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'
    values = {
        'corpid': options.user,
        'corpsecret': options.passwd
    }
    req = requests.post(url, params=values)
    data = json.loads(req.text)
    return data["access_token"]


def send_msg(msg):
    url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="+get_token()
    requests.post(url, data=json.dumps({
        "touser": "@all",
        "toparty": "@all",
        "msgtype": "text",
        "agentid": int(options.appid),
        "text": {
            "content": msg
        },
        "safe": 0
    }, ensure_ascii=False).encode("UTF-8"))


@CacheMemroy.cacheByPickle({'cache': ('./Cache/L1Wrong', set)})
def level1_suspend(**kwargs):
    error_msg = list()
    ready_apps = kwargs['cache']

    for app_info in kwargs['task_list']:
        name = app_info['app_name']
        actives = app_info['active']
        suspend_count = 0

        process_per = 0
        process_count = 0
        for job_id, status, schedule, commit_time, event, run_time in actives:
            if "Suspend" in status:
                suspend_count += 1
            elif "Process" in status:
                process_count += 1
                process_per += schedule

        if suspend_count+process_count > SimpleMonitorConfig.SuspendMax and name not in ready_apps:
            error_msg.append(u"{Name}({P}%) Suspend:{N} Running:{N2}".format(
                Name=name,
                N=suspend_count,
                N2=process_count,
                P=process_per/process_count
            ))
            ready_apps.add(name)
        if suspend_count+process_count < SimpleMonitorConfig.SafeSuspend:
            ready_apps -= {name}

    return {
        'ret': error_msg,
        'cache': ready_apps
    }


@CacheMemroy.cacheByPickle({'cache': ('./Cache/L2Wrong', dict)})
def level2_suspend(**kwargs):
    error_msg = list()
    history_wrongs = kwargs['cache']
    ready_handle_apps = set()
    now = int(time.time())

    for app_info in kwargs['task_list']:
        app_name = app_info['app_name']
        actives = app_info['active']
        new_actives = dict()
        for job_id, status, schedule, commit_time, event, run_time in actives:
            new_actives[job_id] = {
                "app_id": app_info['app_id'],
                "schedule": schedule,
                "commit_time": commit_time,
                "status": status,
                "current_time": now
            }

        history_wrongs.setdefault(app_name, new_actives)
        # history_wrongs[app_name] = new_actives
        for job_id, job_info in new_actives.items():
            if job_id not in history_wrongs[app_name]:
                history_wrongs[app_name][job_id] = job_info

        for job_id, job_info in history_wrongs[app_name].items():
            if job_id not in new_actives: continue
            t = int(new_actives[job_id]['current_time']) - int(job_info['current_time'])
            if app_name not in ready_handle_apps and \
               app_name in SimpleMonitorConfig.MaxSleepTime and \
               t > SimpleMonitorConfig.MaxSleepTime[app_name]:
                ready_handle_apps.add(app_name)
                sche = int(new_actives[job_id]['schedule']) - int(job_info['schedule'])
                if not sche:
                    error_msg.append(
                        u"{Name}.{ID}.僵死, 堆积任务:{N}.".format(
                            Name=app_name,
                            N=len(new_actives),
                            ID=job_id.split('-')[-1]
                        ))
                    # if "wis3" in app_name or "wis2" in app_name:
                    #     kill_cmd = f"""ssh root@hadoop154 "yarn application -kill {new_actives[job_id]['app_id']}" """
                    #     p = os.popen(kill_cmd)
                    #     error_msg.append("执行kill命令完成: " + p.read())
                    #     error_msg.append("(假装)执行kill命令完成")
                    #     p.close()

                elif sche < SimpleMonitorConfig.ScheduleMinActive:
                    error_msg.append(u"{Name}: 进度增加({P}%), 堆积任务({N}).".format(
                        Name=app_name,
                        P=sche,
                        N=len(new_actives)
                    ))
                history_wrongs[app_name][job_id] = new_actives[job_id]
            # else:
            #     job_info["current_time"] = now

    return {
        "ret": error_msg,
        'cache': history_wrongs
    }


@CacheMemroy.cacheByPickle({'cache': ('./Cache/EventWrong', dict)})
def check_event_number(**kwargs):
    error_msg = list()
    ready_events = kwargs['cache']

    for app_info in kwargs['task_list']:
        name = app_info['app_name']
        completed = app_info['completed']
        try:
            new_event = reduce(
                lambda x, y: x+y*1.0,
                map(lambda x: int(x[-2]), completed[0:SimpleMonitorConfig.CheckEventHistoryLength])
            )/SimpleMonitorConfig.CheckEventHistoryLength
        except (Exception, ):
            new_event = 0

        try:
            avg_event = reduce(
                lambda x, y: x+y*1.0,
                map(lambda x: int(x[-2]), completed[SimpleMonitorConfig.CheckEventHistoryLength:])
            )/(len(completed)-SimpleMonitorConfig.CheckEventHistoryLength)
        except (Exception, ):
            avg_event = new_event

        if int(time.time()) - ready_events.get(name, 0) > SimpleMonitorConfig.EventAlarmCycle and \
           new_event/avg_event*100 < SimpleMonitorConfig.EventMinPer and \
           len(completed) > SimpleMonitorConfig.CheckEventHistoryLength:
            error_msg.append(u"{APP}.Event数量为历史的{N}%.".format(
                APP=name,
                N=round(new_event/avg_event*100, 2)
            ))
            ready_events[name] = int(time.time())

    return {
        "ret": error_msg,
        "cache": ready_events
    }


def handle_streaming(**kwargs):
    file_dates = options.filename.split('-')
    task_list = CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/streaming/{"/".join(file_dates[0:4])}/{options.filename}')

    error_msg = level1_suspend(task_list=task_list)
    assert isinstance(error_msg, list)
    error_msg.extend(level2_suspend(task_list=task_list))
    error_msg.extend(check_event_number(task_list=task_list))
    return error_msg


@CacheMemroy.cacheByPickle({'cache': ("./Cache/RunningWrong", dict)})
def check_running_calc_time(**kwargs):
    ready_wrongs = kwargs['cache']

    error_msg = list()
    current_time = kwargs['task_list']['time']

    runningJobs = set()
    for app in kwargs['task_list']['apps']:
        if "root.streaming" in app:
            continue

        runningJobs.add(kwargs['task_list']['apps'][app][Const.ID])
        if app.split('.')[0] not in SimpleMonitorConfig.RunningCalcMaxTime.keys():
            max_time = SimpleMonitorConfig.RunningCalcMaxTime['default']
        else:
            max_time = SimpleMonitorConfig.RunningCalcMaxTime[app.split('.')[0]]

        run_time = current_time-kwargs['task_list']['apps'][app][Const.START_TIME]
        if kwargs['task_list']['apps'][app][Const.ID] not in ready_wrongs.keys() and run_time*100 > max_time*(
                100+SimpleMonitorConfig.RunningAlarmPerInTime):
            error_msg.append(u"⚠️{K}.{ID}运行: {M}m".format(
                K=app.split('.')[0],
                ID=kwargs['task_list']['apps'][app][Const.ID].split('_')[-1],
                M=int(run_time / 60)
            ))
            ready_wrongs[kwargs['task_list']['apps'][app][Const.ID]] = (
                kwargs['task_list']['apps'][app][Const.START_TIME],
                kwargs['task_list']['apps'][app][Const.NAME]
            )

    finished = ready_wrongs.keys() - runningJobs
    for job_id in finished:
        error_msg.append("✅{NAME}.{ID}.结束: {M}m".format(
            NAME=ready_wrongs[job_id][1],
            ID=job_id.split('_')[-1],
            M=int((int(time.time())-ready_wrongs[job_id][0]) / 60)
        ))
        ready_wrongs.pop(job_id)

    return {
        "ret": error_msg,
        "cache": ready_wrongs
    }


@CacheMemroy.cacheByPickle({
    "cache": ('./Cache/RunningStreamingWrong', dict),
    "WrongCache": ('./Cache/RunningStreamingWrongCache', set),
})
def check_running_streaming(**kwargs):
    ready_wrongs = kwargs['cache']
    wrongCache = kwargs['WrongCache']

    error_msg = list()
    live_streaming = list()
    for app in kwargs['task_list']['apps']:
        if "streaming" in app:
            live_streaming.append(kwargs['task_list']['apps'][app][Const.NAME])

    lose_streaming = SimpleMonitorConfig.RunningStreamingLive - set(live_streaming)
    current_time = int(time.time())

    isOk = wrongCache - lose_streaming
    for name in lose_streaming:
        if current_time - ready_wrongs.get(name, current_time) > SimpleMonitorConfig.RunningStreamingLoseTime and name not in wrongCache:
            error_msg.append(u"{N}挂掉!".format(N=name))
            wrongCache.add(name)
        ready_wrongs[name] = current_time

    for name in isOk:
        wrongCache.remove(name)
        error_msg.append(u"{N}回复正常!".format(N=name))
    return {
        "ret": error_msg,
        "WrongCache": wrongCache,
        "cache": ready_wrongs
    }


@CacheMemroy.cacheByPickle({"cache": ('./Cache/PreFinishExecuteTime', dict)})
def finishJob(**kwargs):
    finished = kwargs['cache']

    currentHasDataName = set()
    for app in kwargs['task_list']['apps']:
        if "root.streaming" in app: continue
        appInfo = kwargs['task_list']['apps'][app]
        currentHasDataName.add(appInfo[Const.NAME])
        if not finished.get(appInfo[Const.NAME]):
            finished[appInfo[Const.NAME]] = {
                "ID": appInfo[Const.ID],
                "start_time": appInfo[Const.START_TIME],
                "pre_execute_time": 0
            }

        elif not finished[appInfo[Const.NAME]]['ID']:
            finished[appInfo[Const.NAME]]["ID"] = appInfo[Const.ID]
            finished[appInfo[Const.NAME]]["start_time"] = appInfo[Const.START_TIME]

        elif finished[appInfo[Const.NAME]]['ID'] != appInfo[Const.ID]:
            pre_execute_time = int(time.time()) - int(finished[appInfo[Const.NAME]]['start_time'])

            finished[appInfo[Const.NAME]]['pre_execute_time'] = pre_execute_time
            finished[appInfo[Const.NAME]]["ID"] = appInfo[Const.ID]
            finished[appInfo[Const.NAME]]["start_time"] = appInfo[Const.START_TIME]

        else:
            # ID相等，表示当前任务并没有执行完毕，不管.
            pass

    for appName in finished:
        if appName not in currentHasDataName and finished[appName]['start_time']:
            finished[appName]['pre_execute_time'] = int(time.time()) - int(finished[appName]['start_time'])
            finished[appName]['ID'] = None
            finished[appName]['start_time'] = None

    return {
        'ret': [],
        'cache': finished
    }


@CacheMemroy.cacheByPickle({
    "cache": ('./Cache/PreRunningTime', dict),
    "readyWrong": ("./Cache/PreRunningReadyWrong", set)
})
def PreRunningTime(**kwargs):
    preRunningTime = kwargs['cache']
    readyWrong = kwargs['readyWrong']
    error_msg = list()

    currentTime = int(time.time())

    running = set()
    for app in kwargs['task_list']['apps']:
        app_detail = kwargs['task_list']['apps'][app]
        if app_detail[Const.NAME] not in preRunningTime or 'id' not in preRunningTime[app_detail[Const.NAME]]:
            preRunningTime[app_detail[Const.NAME]] = {
                "start": app_detail[Const.START_TIME],
                "end": 0,
                "id": app_detail[Const.ID]
            }

        running.add(app_detail[Const.NAME])
        if preRunningTime[app_detail[Const.NAME]]['id'] != app_detail[Const.ID] and \
           app_detail[Const.NAME] in SimpleMonitorConfig.BatchLoseJobMaxTime:
            job_id = preRunningTime[app_detail[Const.NAME]]['id'].split("_")[-1]
            lose = currentTime - preRunningTime[app_detail[Const.NAME]]['start']
            preRunningTime[app_detail[Const.NAME]]['id'] = app_detail[Const.ID]
            preRunningTime[app_detail[Const.NAME]]['end'] = currentTime
            preRunningTime[app_detail[Const.NAME]]['start'] = app_detail[Const.START_TIME]
            if lose > SimpleMonitorConfig.BatchLoseJobMaxTime[app_detail[Const.NAME]] and \
               app_detail[Const.ID] not in readyWrong:
                error_msg.append(u"{N}任务{ID}已经丢失: {H}h,{M}m,{S}s".format(
                    N=app_detail[Const.NAME],
                    ID=job_id,
                    H=int(lose / 3600),
                    M=int((lose % 3600) / 60),
                    S=int(lose % 60)
                ))
                readyWrong.add(app_detail[Const.ID])
        else:
            pass  # 相等证明正在运行

    finish = preRunningTime.keys() - running
    for app_name in finish:
        preRunningTime[app_name]['start'] = currentTime

    return {
        'ret': error_msg,
        'cache': preRunningTime,
        "readyWrong": readyWrong
    }


@CacheMemroy.cacheByPickle({"cache": ('./Cache/CheckRunningRRM', dict)})
def check_running_rrm(**kwargs):
    apps = kwargs['task_list']['apps']
    error = list()
    if 'wis_2_minute_rrm.root.wis_2_batch' in apps:
        appid = apps['wis_2_minute_rrm.root.wis_2_batch'][0]
        for domain in SimpleMonitorConfig.Domain:
            req = requests.get(f"http://{domain}/proxy/{appid}/")
            if len(req.content) < 500:
                continue
            allID = map(lambda x: int(x), re.findall(r'<tr id="job-(\d+)">', req.content.decode()))
            maxID = max(allID)
            try:
                oldMaxId = max(kwargs['cache'].keys())
            except:
                oldMaxId = -1
            kwargs['cache'].setdefault(maxID, int(time.time()))
            if oldMaxId == maxID and time.time() - kwargs['cache'][oldMaxId] >= 600:
                m = round((time.time() - kwargs['cache'][oldMaxId])/60, 1)
                error.append(f"rrm已经{m}min没有前进了.")
                # print(u"rrm告警测试")
            elif maxID < oldMaxId:
                kwargs['cache'] = {maxID: int(time.time())}

    return {
        'ret': error,
        'cache': kwargs['cache'],
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/SparkJobsFailedJobs", dict)})
def SparkJobsFailedJobs(**kwargs):
    # Failed Jobs
    error = list()
    for appName in kwargs['task_list']['apps']:
        appId = kwargs['task_list']['apps'][appName][0]

        if 'streaming' not in appName:
            for domain in SimpleMonitorConfig.Domain:
                try:
                    req = requests.get(f"http://{domain}/proxy/{appId}/")
                    if len(req.content) < 500:
                        continue
                except:
                    continue
                if 'Failed Jobs' in req.content.decode():
                    error.append(f'{appId}:出现Failed Jobs')

    return {
        'ret': error,
        'cache': kwargs['cache'],
    }


def handle_running(**kwargs):
    file_dates = options.filename.split('-')
    lt_tasks = CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/running/{"/".join(file_dates[0:4])}/{options.filename}')

    if len(lt_tasks):
        lt_tasks = lt_tasks[0]
    else:
        return []

    error_msg = finishJob(task_list=lt_tasks)

    assert isinstance(error_msg, list)
    error_msg.extend(PreRunningTime(task_list=lt_tasks))
    error_msg.extend(check_running_calc_time(task_list=lt_tasks))
    error_msg.extend(check_running_streaming(task_list=lt_tasks))
    error_msg.extend(check_running_rrm(task_list=lt_tasks))
    error_msg.extend(SparkJobsFailedJobs(task_list=lt_tasks))
    return error_msg


@CacheMemroy.cacheByPickle({"cache": ("./Cache/FailedWrong", set)})
def check_failed(**kwargs):
    error_msg = list()

    ready_wrongs = kwargs['cache']

    for job_id, job_name, job_queue in kwargs['task_list']['apps']:
        if job_id not in set(ready_wrongs):
            ready_wrongs.add(job_id)
            error_msg.append(u"⁉️ Spark.Failed:{NAME}.{ID}".format(
                NAME=".".join((job_name, job_queue)),
                ID=job_id
            ))

    return {
        "ret": error_msg,
        "cache": ready_wrongs
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/KilledWrong", set)})
def check_killed(**kwargs):
    ready_wrongs = kwargs['cache']

    for job_id, job_name, job_queue in kwargs['task_list']['apps']:
        if job_id not in set(ready_wrongs):
            ready_wrongs.add(job_id)

    return {
        "ret": [],
        "cache": ready_wrongs
    }


def handle_failed(**kwargs):
    file_dates = options.filename.split('-')
    lt_tasks = CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/failed/{"/".join(file_dates[0:4])}/{options.filename}')
    if len(lt_tasks):
        lt_tasks = lt_tasks[0]
        error_msg = check_failed(task_list=lt_tasks)

    else:
        error_msg = list()

    lt_tasks = CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/killed/{"/".join(file_dates[0:4])}/{options.filename}')
    if len(lt_tasks):
        lt_tasks = lt_tasks[0]
        error_msg.extend(check_killed(task_list=lt_tasks))
    return error_msg


@CacheMemroy.getByPickle({
    'kills': './Cache/KilledWrong',
    'fails': './Cache/FailedWrong'
})
def zabbix_running(**kwargs):
    if not SimpleMonitorConfig.ZabbixOutPath.get('running'):
        return []

    file_dates = options.filename.split('-')
    tasks = CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/running/{"/".join(file_dates[0:4])}/{options.filename}')[0]['apps']
    for path in glob("./Cache/zabbix/running/*"):
        app_name = path[path.rfind("/")+1:]
        try:
            app_cache = CacheMemroy.getByJsonLoad(path)
        except:
            app_cache = {"Running":[]}

        if app_name not in tasks:
            if app_cache['Running']:
                os.system('echo {MSG} > {FILE_PATH}'.format(
                    MSG=int((int(time.time())-app_cache['Running'][1])/60),
                    FILE_PATH="/".join([SimpleMonitorConfig.ZabbixOutPath['running'], app_name])
                ))
                #app_cache['Finish'].insert(0, (app_cache['Running'][0], int(time.time())-app_cache['Running'][1]))
                app_cache['Running'] = []
        else:
            new_app = tasks[app_name]
            if not app_cache['Running']:
                app_cache['Running'] = [new_app[Const.ID], int(new_app[Const.START_TIME])]
            elif new_app[Const.ID] != app_cache['Running'][0]:
                if app_cache['Running'][0] not in kwargs['kills'] and app_cache['Running'][0] not in kwargs['fails']:
                    running_time = int(time.time())-app_cache['Running'][1]
                else:
                    running_time = 0
                #app_cache['Finish'].insert(0, (app_cache['Running'][0], running_time))
                app_cache['Running'] = [new_app[Const.ID], int(new_app[Const.START_TIME])]
                os.system('echo {MSG} > {FILE_PATH}'.format(
                    MSG=int((int(time.time())-app_cache['Running'][1])/60),
                    FILE_PATH="/".join([SimpleMonitorConfig.ZabbixOutPath['running'], app_name])
                ))

        CacheMemroy.setByJsonDump(path, app_cache)


def zabbix_streaming(**kwargs):
    if not SimpleMonitorConfig.ZabbixOutPath.get('streaming'):
        return []

    file_dates = options.filename.split('-')
    for item in CacheMemroy.getByJsonLoad(f'{SimpleMonitorConfig.DataDirect}/streaming/{"/".join(file_dates[0:4])}/{options.filename}'):
        try:
            run_time = item['completed'][0][5]
        except (Exception, ):
            run_time = 0
        os.system('echo {MSG} > {FILE_PATH}'.format(
            MSG=run_time,
            FILE_PATH="/".join([SimpleMonitorConfig.ZabbixOutPath['streaming'], item['app_name']])
        ))
    return []


@CacheMemroy.cacheByPickle({
    "cache": ("./Cache/MongoReplication", dict),
    "mongo": ("./Cache/MongoPortLife", set),
    "raid": ("./Cache/MongoRaid", dict),
    "port": ("./Cache/MongoPort", dict),
    "log": ("./Cache/Mongolog", dict),
})
def mongo_replication(**kwargs):
    mongoWrong = kwargs['cache']
    mongoPortLife = kwargs['mongo']
    mongoRaid = kwargs['raid']
    mongoPort = kwargs['port']
    mongoLog = kwargs['log']

    error = []

    process_lst = list()

    def _get_replication(_ip):
        cmd = f'/bin/echo "db.printSlaveReplicationInfo()" | /usr/local/mongodb3.4.1/bin/mongo {_ip}  | grep secs | cut -d " " -f1 | cut -f2'
        process = os.popen(cmd)
        output = process.read()
        process.close()
        try:
            int(output)
        except:
            return
        mongoWrong.setdefault(_ip, int(output))

        if int(output) > SimpleMonitorConfig.MongoMonitorSetting['Replication']:
            if mongoWrong[_ip] < SimpleMonitorConfig.MongoMonitorSetting['Replication'] or \
               (int(output)-mongoWrong[_ip]) > SimpleMonitorConfig.MongoMonitorSetting['Replication']:
                error.append(f"❓mongo.{_ip.split('.')[-1]}.同步时长: {output}")
        else:
            if mongoWrong[_ip] > SimpleMonitorConfig.MongoMonitorSetting['Replication']:
                error.append(f"✅mongo.{_ip.split('.')[-1]}.同步时长: {output}")
        mongoWrong[_ip] = int(output)

    for ip in SimpleMonitorConfig.MongoServer:
        process_lst.append(Thread(target=_get_replication, args=(ip, )))

    cmds = {
        "172.18.233.160": [
            "shard1_26001",
            "mongos_26000"
        ],
        "172.18.233.161": [
            "shard1_26001",
            "mongos_26000",
            "shard7_26007"
        ],
        "172.18.233.162": [
            "shard2_26002",
            "config_26100",
            "mongos_26000"
        ],
        "172.18.233.163": [
            "shard2_26002",
            "mongos_26000",
            "shard1_26001"
        ],
        "172.18.233.164": [
            "config_26100",
            "shard3_26003",
            "mongos_26000"
        ],
        "172.18.233.165": [
            "shard3_26003",
            "mongos_26000",
            "shard2_26002"
        ],
        "172.18.233.166": [
            "config_26100",
            "shard4_26004",
            "mongos_26000"
        ],
        "172.18.233.167": [
            "shard4_26004",
            "mongos_26000",
            "shard3_26003"
        ],
        "172.18.233.168": [
            "shard5_26005",
            "mongos_26000",
        ],
        "172.18.233.169": [
            "shard4_26004",
            "mongos_26000",
            "shard5_26005"
        ],
        "172.18.233.170": [
            "shard6_26006",
            "mongos_26000",
        ],
        "172.18.233.171": [
            "shard6_26006",
            "mongos_26000",
            "shard5_26005"
        ],
        "172.18.233.172": [
            "mongos_26000",
            "shard7_26007"
        ],
        "172.18.233.173": [
            "shard6_26006",
            "mongos_26000",
            "shard7_26007"
        ],
        "172.18.233.174": [
            "mongos_26000",
            "shard8_26008"
        ],
        "172.18.233.175": [
            "mongos_26000",
            "shard8_26008"
        ],
        "172.18.233.176": [
            "mongos_26000",
            "shard9_26009"
        ],
        "172.18.233.177": [
            "mongos_26000",
            "shard9_26009"
        ]
    }

    def _get_mongo_data(_ip):
        p = os.popen(f"""ssh {_ip} "/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aALL | grep 'Firmware state'" """)
        ret = sum(['Online' in i for i in p.read().split('\n')])
        if ret < 8:
            mongoRaid.setdefault(_ip, 0)
            mongoRaid[_ip] += 1
            if mongoRaid[_ip] >= 3:
                error.append(f"❌ {_ip}.Raid:{ret}")
        else:
            mongoRaid[_ip] = 0
        p.close()

        for cmd in cmds[_ip]:
            p = os.popen(f"ssh {_ip} 'ps -ef | grep {cmd}.conf | grep -v grep'")
            ret = p.read().strip()
            p.close()

            # 如果连接失败，尝试重连一次
            if not ret:
                p = os.popen(f"ssh {_ip} 'ps -ef | grep {cmd}.conf | grep -v grep'")
                ret = p.read().strip()
                p.close()
            
            mongoPort.setdefault(f"{_ip}.{cmd}", 0)
            if cmd not in ret:
                mongoPort[f"{_ip}.{cmd}"] += 1
                if (_ip, cmd) not in mongoPortLife and mongoPort[f"{_ip}.{cmd}"] >= 3:
                    error.append(f"{_ip.split('.')[-1]}.{cmd}: 没法获取进程信息")
                    mongoPort[f"{_ip}.{cmd}"] = 0
                    mongoPortLife.add((_ip, cmd))
            else:
                if (_ip, cmd) in mongoPortLife:
                    error.append(f"✅ Mongo:{_ip.split('.')[-1]}.{cmd}")
                    mongoPortLife.remove((_ip, cmd))
                    mongoPort[f"{_ip}.{cmd}"] = 0

    def _check_log(_ip):
        _n = datetime.datetime.now()
        p = os.popen(f"""ssh {_ip} "tail -n 20 /data/mongodb_26000/monitor/monitor_mongo.log" """)
        text = p.read()
        rows = text.split('\n')
        p.close()
        for row in rows:
            if 'ERROR' in row:
                t = datetime.datetime.strptime(re.findall(r'\[(.*?)\]', row)[0], '%Y-%m-%d %H:%M:%S')
                pid = row.split(' ')[5]
                d_key = f"{_ip}:{pid}"
                mongoLog.setdefault(d_key, {'status': False, 'time': t})
                if mongoLog[d_key].get('status', False):
                    mongoLog[d_key]['status'] = False
                    mongoLog[d_key]['time'] = t
                    error.append(f"{d_key}: 日志中检测到进程丢失")

            elif "check" in row:
                t = datetime.datetime.strptime(re.findall(r'\[(.*?)\]', row)[0], '%Y-%m-%d %H:%M:%S')
                pids = list(map(
                    lambda x: re.findall(r'(.*?)\(', x)[0], 
                    re.findall(r'check(.*?)OK', row)[0].strip().split(' ')))

                for pid in pids:
                    d_key = f"{_ip}:{pid}"
                    mongoLog.setdefault(d_key, {'status': True, 'time': t})
                    if not mongoLog[d_key]['status'] and t > mongoLog[d_key]['time']:
                        mongoLog[d_key]['status'] = True
                        mongoLog[d_key]['time'] = t
                        error.append(f"{d_key}: 日志中检测到进程恢复正常")

    for ip in cmds:
        process_lst.append(Thread(target=_get_mongo_data, args=(ip, )))
        process_lst.append(Thread(target=_check_log, args=(ip, )))

    for p in process_lst:
        p.start()

    for p in process_lst:
        p.join()

    return {
        "ret": error,
        "cache": mongoWrong,
        "raid": mongoRaid,
        "port": mongoPort,
        "log": mongoLog,
        "mongo": mongoPortLife
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/QuotaMonitor", dict)})
def QuotaMonitor(**kwargs):
    p = os.popen('ps aux | grep ".*python [a-zA-Z]"')
    ps_content = p.read()
    ps_count = len(ps_content.split('\n'))
    p.close()

    p = os.popen('cat /root/zkw/project/scmonitor/run.sh | grep nohup')
    py_content = p.read()
    py_count = len(py_content.split('\n'))
    p.close()

    error = []
    ps_tasks = set([i.split('\n')[0] for i in list(filter(
        lambda x: ".py" in x,
        [i for i in ps_content.split(' ')]
    ))])
    py_tasks = set([i.split('\n')[0] for i in list(filter(
        lambda x: ".py" in x,
        [i for i in py_content.split(' ')]
    ))])
    if (py_tasks-ps_tasks):
        p = os.popen('/bin/bash /root/zkw/project/scmonitor/run.sh stop;/bin/bash /root/zkw/project/scmonitor/run.sh start')
        p.read()
        p.close()
        
    
    return {
        "ret": error,
        "cache": {},
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/QuotaMonitor", dict)})
def StreamingMonitor(**kwargs):
    error = []
    
    # ip = '172.18.33.152' 
    ip = '172.18.233.100' 
    cmd = "ps -ef |grep StreamingAppMonitor.jar | grep -v grep"
    p = os.popen(f'ssh {ip} "{cmd}"')
    ps_content = p.read()
    ps_count = len(ps_content.split('\n'))
    p.close()

    if ps_count < 2 and 'block' not in ps_content:
        error.append(f"{ip}的StreamingAppMonitor.Block停止")
    elif ps_count < 2:
        error.append(f"{ip}的StreamingAppMonitor停止")

    return {
        "ret": error,
        "cache": {}
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/QuotaMonitor", dict)})
def ZabbixMonitor(**kwargs):
    error = []

    cmd = "docker ps | grep zabbix"
    p = os.popen(cmd)
    ps_content = p.read()
    ps_count = len(ps_content.split('\n'))
    p.close()

    server = set()
    for i in ps_content.split('\n'):
        server.add(i.split(' ')[-1])
    lose = {"zabbix-web-nginx-mysql", "zabbix-server-mysql", "zabbix-mysql-server", "zabbix-java-gateway"}-server
    if lose:
        error.append(f'233.Zabbix的组件缺失:{lose}')

    return {
        "ret": error,
        "cache": {}
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/OnlineSave", dict)})
def OnlineSave(**kwargs):
    error = []

    p = os.popen('ssh 172.18.233.3 "ps aux | grep kcs.jar"')
    ps_content = p.read()
    p.close()

    if "spark" not in ps_content:
        kwargs['cache'].setdefault('spark', 0)
        kwargs['cache']['spark'] += 1
        if kwargs['cache']['spark'] >= 5:
            error.append(f'离线数据存储任务丢失, 自动启动尝试失败')
    else:
        kwargs['cache']['spark'] = 0

    return {
        "ret": error,
        "cache": kwargs['cache']
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/SDNNetworkMonitor", dict)})
def SDNNetworkMonitor(**kwargs):
    error = []
    for ip in ['172.31.159.11', '172.31.159.12', '172.31.159.13', '172.31.159.14', '172.31.159.18']:
        p = os.popen(f'ssh  {ip} "ifconfig | grep inet"')
        retStr = p.read()
        p.close()
        
        if ip not in retStr and ip not in kwargs['cache']:
            kwargs['cache'][ip] = int(time.time())
            error.append(f"SDN:{ip} lose...")
        elif kwargs['cache'].get(ip):
            oldTime = kwargs['cache'].pop(ip)
            loseTime = int(time.time()) - oldTime

            if loseTime >= 3600:
                error.append(f"SDN:{ip} connected, lose {loseTime/3600.0}h.")
            elif loseTime > 60:
                error.append(f"SDN:{ip} connected, lose {loseTime/60.0}m.")
            else:
                error.append(f"SDN:{ip} connected, lose {loseTime}s.")
    
    return {
        "ret": error,
        "cache": kwargs['cache']
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/SdnSparkWeb", dict)})
def SdnSparkWeb(**kwargs):
    error = []

    p = os.popen('ssh 172.31.159.18 "ps aux | grep python3 | grep -v grep"')
    ps_content = p.read()
    p.close()

    need_deep_analy = False

    for field in ['uwsgi', 'SparkInfoCollect']:
        if field not in ps_content:
            kwargs['cache'].setdefault(field, [3, 1])
            kwargs['cache'][field][0] -= 1
            if kwargs['cache'][field][0] == 0:
                error.append(f'SDN支持：进程{field}丢失.')
                need_deep_analy = True
                kwargs['cache'][field][1] *= 2
                kwargs['cache'][field][0] = kwargs['cache'][field][1]
        elif field in  kwargs['cache']:
            error.append(f'SDN支持：进程{field}恢复正常.')
            kwargs['cache'].pop(field) 

    if need_deep_analy:
        for cmd, str_ok, msg in [
            ('ssh 172.31.159.18 "service nginx status"', 'running', 'nginx未启动'),
            ('ssh 172.31.159.18 "ps aux | grep postgres | grep -v grep"', 'postgres spark_web', 'postgres未启动'),
        ]:
            p = os.popen(cmd)
            ps_content = p.read()
            p.close()

            if str_ok not in ps_content:
                error.append('·'+msg)

    return {
        "ret": error,
        "cache": kwargs['cache']
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/MongoByWis", dict)})
def MongoByWis(**kwargs):
    error = []

    p = os.popen('ssh 172.18.233.160 "ps aux | grep kcs.jar"')
    ps_content = p.read()
    p.close()

    for cmd, result, errMsg in [
        ("""cat /home/mongod/db/log/shardWisByDay_$(date +\%Y\%m\%d).log|grep " ok " |wc -l""", 70, "shardWisByDay异常"),
        ("""cat /home/mongod/db/log/indexWisByDay_$(date +\%Y\%m\%d).log|grep "***error***" |wc -l""", 0, "indexWisByDay异常"),
        ("""cat /home/mongod/db/log/cluster_ShardDistributions_$(date +\%Y\%m\%d).log |wc -l""", 3558, "ShardDistributions异常"),
    ]:
        p = os.popen(f"ssh 172.18.233.160 '{cmd}'")
        ret = p.read().strip()
        p.close()
        
        if ret and int(ret) != result:
            try:
                error.append(f"{errMsg}({result-int(ret)})")
            except:
                error.append(f"{errMsg}(返回数据无法被转换成int: '{ret}')")


    return {
        "ret": error,
        "cache": kwargs['cache']
    }


@CacheMemroy.cacheByPickle({"cache": ("./Cache/WisRaidOnline", dict)})
def WisRaidOnline(**kwargs):
    error = []
    
    p = os.popen('ssh 172.18.233.3 "/opt/MegaRAID/storcli/storcli64 /c0 show | grep SATA"')
    # p = os.popen('ssh 172.18.233.3 "cat /tmp/wis_raid"')
    raid = p.read().strip().split('\n')
    p.close()
    
    p = os.popen('ssh 172.18.233.3 "/opt/MegaRAID/storcli/storcli64 /c1 show | grep SATA"')
    raid.extend(p.read().strip().split('\n'))
    p.close()

    one_raid = len(list(filter(lambda x: "Onln" not in x, raid))) < 2

    error_raid = []
    ok_raid = []
    now_time = int(time.time())
    for row in raid:
        data = list(filter(lambda x: x, row.split(' ')))
        if "Onln" != data[2]:
            if one_raid:
                kwargs['cache'].setdefault(data[0], [now_time+10, 1])
            else:
                kwargs['cache'].setdefault(data[0], [now_time+10, 3])
                for name in kwargs['cache']:
                    kwargs['cache'][name][1] = 3

            if kwargs['cache'][data[0]][0] <= now_time and kwargs['cache'][data[0]][1] > 0:
                error_raid.append(f"{data[0]} {data[2]}")
                kwargs['cache'][data[0]][0] = now_time+300
                kwargs['cache'][data[0]][1] -= 1

        elif data[0] in kwargs['cache']:
            kwargs['cache'].pop(data[0])
            ok_raid.append(f"{data[0]}")

    if error_raid:
        error.append("wis磁盘柜掉盘:\n"+"\n".join(error_raid))

    if ok_raid:
        error.append("wis磁盘柜恢复:\n"+"、".join(ok_raid))
    return {
        "ret": error,
        "cache": kwargs['cache']
    }


@CacheMemroy.cacheByPickle({
    "cache": ("./Cache/OnlineServer", set)
})
def OnlineMonitor(**kwargs):
    err = []
    for group, iplist in SimpleMonitorConfig.OnlineServer.items():
        for ip in iplist:
            p = os.popen(f"ping {ip} -c 1")
            isOnline = True if "0 packets received" not in p.read() else False
            if not isOnline and ip not in kwargs['cache']:
                err.append(f"{group}: {ip}失联。")
                kwargs['cache'].add(ip)
            elif isOnline and ip in kwargs['cache']:
                err.append(f"{group}: {ip}恢复连接。")
                kwargs['cache'].remove(ip)

    return {"ret": err, "cache": kwargs['cache']}


@CacheMemroy.cacheByPickle({
    "cache": ("./Cache/CdhQueryMonitor", set)
})
def CdhQueryMonitor(**kwargs):
    err = list()
    host = f"http://{SimpleMonitorConfig.CDH[0]}"
    try:
        requests.get(host)
    except:
        host = f"http://{SimpleMonitorConfig.CDH[1]}"

    auth = HTTPBasicAuth(SimpleMonitorConfig.CdhUser, SimpleMonitorConfig.CdhPassword)    
    header = {'Accept': 'application/json', 'Content-Type': 'application/json'}
    for name, data in SimpleMonitorConfig.CdhQuerys.items():
        url = f"{host}/api/v11/timeseries?query={data['query'].replace(' ', '%20')}"
        res = requests.get(url, headers=header, auth=auth).json()
        if data['type'] == 'rate':
            avg = sum(map(lambda x: x['value'], res['items'][0]['timeSeries'][0]['data'][:-1]))/(len(res['items'][0]['timeSeries'][0]['data'])-1)
            now = res['items'][0]['timeSeries'][0]['data'][-1]['value']
            value = now / avg * 100
        else:
            value = res['items'][0]['timeSeries'][0]['data'][-1]['value']
        
        value = round(value, 2)
        if value < data['min'] and name not in kwargs['cache']:
            err.append(f"{name}({value}):低于下限({data['min']}).")
            kwargs['cache'].add(name)
        elif value > data['max'] and name not in kwargs['cache']:
            err.append(f"{name}({value}):高于上限({data['max']}).")
            kwargs['cache'].add(name)
        elif data['min'] < value < data['max'] and name in kwargs['cache']:
            err.append(f"{name}:恢复正常.")
            kwargs['cache'].remove(name)
            
    return {"ret": err, "cache": kwargs['cache']}


@CacheMemroy.cacheByPickle({"cache": ("./Cache/CodisServerMonitor", set)})
def CodisServerMonitor(**kwargs):
    err = []
    rdsCli = SimpleMonitorConfig.RedisCliPath
    proxys = ["172.18.233.220:29000", "172.18.233.220:39000", "172.18.233.221:29000", 
              "172.18.233.221:39000", "172.18.233.222:29000", "172.18.233.222:39000", 
              "172.18.233.223:39000", "172.18.233.223:29000", "172.18.233.224:29000", 
              "172.18.233.224:39000"]

    def _get_sentinel(_sentinel):
        st = time.time()
        h, p = _sentinel.split(':')
        p = os.popen(f"{rdsCli} -h {h} -p {p} ping")
        pong = p.read().strip()
        p.close()
        print('\t',_sentinel, time.time()-st, 's')
        
        if "PONG" not in pong:
            p = os.popen(f"{rdsCli} -h {h} -p {p} ping")
            pong = p.read().strip()
            p.close()

        if "PONG" not in pong:
            if _sentinel not in kwargs['cache']:
                err.append(f"Codis-Sentinel：{_sentinel} Ping-Pong失败.")
                kwargs['cache'].add(_sentinel)
        else:
            if _sentinel in kwargs['cache']:
                err.append(f"Codis-Sentinel：{_sentinel} Ping-Pong恢复正常.")
                kwargs['cache'].remove(_sentinel)

    process_lst = list()
    for sentinel in ["172.18.233.222:46379", "172.18.233.221:46379", "172.18.233.223:46379", "172.18.233.224:46379", "172.18.233.220:46379"]:
        process_lst.append(Thread(target=_get_sentinel, args=(sentinel, )))

    def _get_proxy(_proxy):
        st = time.time()
        h, p = _proxy.split(':')
        p = os.popen(f"{rdsCli} -h {h} -p {p} ping")
        pong = p.read().strip()
        p.close()
        print('\t',_proxy, time.time()-st, 's')
        if "PONG" not in pong:
            if _proxy not in kwargs['cache']:
                err.append(f"Codis-Proxy：{_proxy} Ping-Pong失败.")
                kwargs['cache'].add(_proxy)
        else:
            if _proxy in kwargs['cache']:
                err.append(f"Codis-Proxy：{_proxy} Ping-Pong恢复正常.")
                kwargs['cache'].remove(_proxy)
    for proxy in proxys:
        process_lst.append(Thread(target=_get_proxy, args=(proxy, )))
    
    def _get_server(_ip):
        for port in range(26379, 26393):
            st = time.time()
            p = os.popen(f"{rdsCli} -h {_ip} -p {port} ping")
            pong = p.read().strip()
            p.close()
            print('\t',_ip, port, time.time()-st, 's')

            if "PONG" not in pong:
                if "{_ip}:{port}" not in kwargs['cache']:
                    err.append(f"Codis-Server：{_ip}:{port} Ping-Pong失败.")
                    kwargs['cache'].add("{_ip}:{port}")
            else:
                if "{_ip}:{port}" in kwargs['cache']:
                    err.append(f"Codis-Server：{_ip}:{port} Ping-Pong恢复正常.")
                    kwargs['cache'].remove("{_ip}:{port}")

    for ip in ["172.18.233.220", "172.18.233.221", "172.18.233.222", "172.18.233.223", "172.18.233.224"]:
        process_lst.append(Thread(target=_get_server, args=(ip, )))

    for p in process_lst:
        p.start()

    for p in process_lst:
        p.join()

    return {"ret": err, "cache": kwargs['cache']}


def main():
    try:
        error_msg = {
            "streaming": handle_streaming,
            "running": handle_running,
            "failed": handle_failed,
            "zabbix.running": zabbix_running,
            "zabbix.streaming": zabbix_streaming,
            "mongo.replication": mongo_replication,
            "quota.monitor": QuotaMonitor,
            "streaming.monitor": StreamingMonitor,
            "zabbix.monitor": ZabbixMonitor,
            "online.save": OnlineSave,
            "mongo.wis": MongoByWis,
            "sdn.spark": SdnSparkWeb,
            "wis.raid": WisRaidOnline,
            "sdn.network": SDNNetworkMonitor,
            "codis.monitor": CodisServerMonitor,
            "online.monitor": OnlineMonitor,
            "cdh.query": CdhQueryMonitor,
        }[options.mode]()
    except json.decoder.JSONDecodeError as e:
        print("main error =>", options.mode, e)
        error_msg = list()

    if error_msg:
        error_msg.insert(0, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        notice("\n".join(error_msg))

if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option(
        "-f",
        "--file",
        dest="filename",
        help="scrapy out filename that format is json",
        metavar="FILE"
    )
    parser.add_option(
        "-m",
        "--mode",
        dest="mode",
        help=u"检查异常的类型:streaming, running, failed",
        metavar="FILE"
    )
    parser.add_option("-u", "--user", dest="user",
                      help=u"CorpID", metavar="FILE")
    parser.add_option("-p", "--passwd", dest="passwd",
                      help=u"Secret", metavar="FILE")
    parser.add_option("-a", "--appid", dest="appid",
                      help=u"Appid", metavar="FILE")
    (options, args) = parser.parse_args()
    main()
