#!/usr/bin/python
# -*- coding:utf-8 -*-
import subprocess
import time

__version__ = '0.1alpha'
__author__ = 'liangkangkang@epic.com'
__license__ = 'xxxx'
__birth__ = '2020-01-09'

from utils.config import ConfigManager, JDBC_CONF_PATH, NODE_CONF_PATH, STATUS_CONF_PATH, get_soc_ip, get_cu_hash, \
    get_cu_ip, KAFKA_CONF_PATH, get_app_conf

from utils import streamsetsrest
from utils.tools import getLogger, md5, run_cmd_by_root, check_build_in
from common import fileStrList, cleanConfigFile, writeConfigToFile
import threading
import socket
import yaml
import os
import json
import traceback
import psycopg2
import shutil
import datetime
import time
import sys

from utils.orchid import Orchid

DIRECTORY = "/opt/epic/soc/conf/ds_monitor/"

hangout_conf = "/opt/epic/soc/conf/service/es.yml"

common_topic = "/opt/epic/soc/conf/streamsets/resources/common_topic.json"

STREAMSETS_DATA = "/opt/epic/soc/conf/streamsets/data"
INTERNAL_PATH = "/opt/epic/soc/conf/streamsets/internal"
PIPELINECONFIG = "pipelineConfig"
RESOURCE_PATH = "/opt/epic/soc/conf/streamsets/resources/"
PARSE = "parse"
ACCESS = "access"
STORE = "store"
CONTROL = "control"
DELETE = "delete"
ES = "es"
PARQUET = "parquet"
PREVIEW = "preview"
UUID = "uuid"
TITLE = "title"
MOVE = "move"

jdbcMgr = ConfigManager(JDBC_CONF_PATH, None)
cuMgr = ConfigManager(NODE_CONF_PATH, None)
statusMgr = ConfigManager(STATUS_CONF_PATH, None)

pipeline_monitor_conf = '/opt/epic/soc/majordomo/conf/pipeline_monitor_config.json'
g_logger = getLogger('cuslave')


def get_orchid():
    soc_ip = get_soc_ip()
    if is_slave() or check_build_in():
        orchid = Orchid(protocol='http', host=soc_ip, port=8091, prefix='')
    else:
        orchid = Orchid(auth_type='signature', host=soc_ip, prefix='')
    status = statusMgr.get_config()
    cu_token = None
    if status.has_key('token'):
        cu_token = status['token']
    cu_hash = get_cu_hash()
    orchid.appId = cu_hash
    orchid.accessToken = cu_token
    return orchid


def write_conf(s):
    with open(pipeline_monitor_conf, 'w') as f:
        f.write(json.dumps(s, indent=4, ensure_ascii=False))
        f.close()


def write_oom_conf(file_name, s):
    with open(file_name, 'w') as f:
        f.write(json.dumps(s, indent=4, ensure_ascii=False))
        f.close()

def read_oom_conf(file_name):
    try:
        with open(file_name, 'r') as f:
            data = json.loads(f.read())
            f.close()
            return data
    except:
        return {}

def read_conf():
    try:
        with open(pipeline_monitor_conf, 'r') as f:
            data = json.loads(f.read())
            f.close()
            return data
    except:
        return {}


def is_start_stopped_pipeline(histroy_status):
    current_time = int(time.time())*1000
    if len(histroy_status) > 0:
        for status in histroy_status:
            if status.get("status") == "STOPPED":
                ckeck_time = current_time - int(status.get('timeStamp'))
                if ckeck_time > 60*60:
                    return True
    else:
        return False


def check_is_oom():
    ret = 0
    try:
        cmd_ret = run_cmd_by_root('cat /opt/epic/soc/log/streamsets/sdc.log  |grep -i OutOfMemory |grep -vi pipeline | wc -l', shell=True)
        ret = int(cmd_ret['result'][0].strip())
    except:
        pass

    return ret


def get_slave_pipelines_dict(pipelines):
    ret = {}
    for pipeline in pipelines:
        internal_dict = {}
        internal_dict['uuid'] = pipeline['uuid']
        internal_dict['status'] = pipeline['status']
        ret[pipeline['name']] = internal_dict
    return ret


def get_dev_ip():
    local_ip = None
    try:
        local_ip = socket.gethostbyname(socket.gethostname())
    except:
        g_logger.error(str(traceback.format_exc()))
    if local_ip is None or local_ip == '127.0.0.1' or not isIPv4(local_ip):
        linuxInfo = platform.linux_distribution()
        if linuxInfo[1].startswith("6.5"):
            shellscript = 'ifconfig | grep "inet addr" | grep Bcast | awk -F \' \' \'{print $2}\' | awk -F \':\' \'{print $2}\''
            targetRet = subprocess.Popen(shellscript, stdout=subprocess.PIPE, shell=True)
            local_ip = targetRet.stdout.readline().strip('\n')
        elif linuxInfo[1].startswith("7.0"):
            shellscript = 'ifconfig | grep inet | grep broadcast | awk -F \' \' \'{print $2}\' | awk \'NR==1{print}\''
            targetRet = subprocess.Popen(shellscript, stdout=subprocess.PIPE, shell=True)
            local_ip = targetRet.stdout.readline().strip('\n')
    g_logger.info("local_ip is %s", local_ip)
    return local_ip

def get_node_ip():
    config = cuMgr.get_config()
    return config.get("ip", "")


def is_master():
    config = cuMgr.get_config()
    return config.get("type", "") == "master"


def is_slave():
    config = cuMgr.get_config()
    return config.get("type", "") == "slave"

def delete_pipeline(ip):
    pipelines = streamsetsrest.getPipelines(DELETE, 'true', ip)
    pipelinesStatus = pipelines[1]
    for i in range(len(pipelinesStatus)):
        pipeline = pipelinesStatus[i]
        if pipeline.get("status", "") == "RUNNING":
            try:
                streamsetsrest.stopPipeline(pipeline.get("name", ""))
            except Exception as e:
                pass
    streamsetsrest.deleteByFiltering(DELETE, ip)


def control(ip, label):
    pipelines = streamsetsrest.getPipelines(label, 'true', ip)
    if pipelines:
        pipelinesStatus = pipelines[1]
        for pipeline in pipelinesStatus:
            if pipeline.get("status", "") == "EDITED":
                try:
                    streamsetsrest.startPipeline(pipeline.get('name'), ip)
                    time.sleep(0.01)
                except Exception as e:
                    pass
            if pipeline.get("status", "") == "STOPPED":
                if label == PREVIEW or label == PARQUET:
                    try:
                        streamsetsrest.startPipeline(pipeline.get('name'), ip)
                        time.sleep(0.01)
                    except Exception as e:
                        pass


def get_hd_hash():
    hd_hash = ''
    try:
        build_in = check_build_in()
        if build_in:
            cmd = '/opt/epic/soc/conf/service/hdhash_new'
        else:
            cmd = '/opt/epic/soc/bin/hdhash_new'
        ret = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        hd_hash = ret.stdout.readline().strip('\n')
    except:
        g_logger.error(str(traceback.format_exc()))
    return hd_hash


def send_message_center(status, master):
    '''
    INSERT INTO "public"."tbl_msg_center" ("uid", "type", "priority", "params", "content", "objid","ctime","status","rtime")
    VALUES ('0', 'MONITOR_WAINING', '3', '', '采集器-10.65.133.7 [日志接入模块] 出现 pipeline 处于 状态，请及时反馈处理!!!', 'system','2019-09-11 11:26:21.317080','unread','2019-09-11 11:26:21.317080')
    '''
    if master:
        master_ip = jdbcMgr.get_jdbc_config("soc.host")
        user_name = jdbcMgr.get_jdbc_config("jdbc.username")
        password = jdbcMgr.get_jdbc_config("jdbc.password")
        send_ip = master_ip
    else:
        master_ip = get_soc_ip()
        app_conf = get_app_conf()
        psql_conf = app_conf['postgreSQL']
        user_name = psql_conf['user']
        password = psql_conf['pwd']
        send_ip = get_cu_ip()
    current_time = datetime.datetime.now()
    if status == 'START_ERROR':
        content = "采集器-%s [日志接入模块] Pipeline 处于 %s 状态, 请打400及时反馈处理！！！"  %(send_ip, "启动错误")
    else:
        content = "采集器-%s [日志接入模块] Pipeline 处于 %s 状态, 请打400及时反馈处理！！！"  %(send_ip, "停止错误")

    sql = '''INSERT INTO "public"."tbl_msg_center" ("uid", "type", "priority", "params", "content", "objid","ctime","status","rtime") VALUES ('0', '', '3', '', '%s', 'system','%s','unread','%s')''' \
          %(content, current_time, current_time)
    try:
        conn = psycopg2.connect(database="soc", user=user_name, password=password, host=master_ip, port=5432)
        cur = conn.cursor()
        cur.execute(sql)
        conn.commit()
        cur.close()
        conn.close()
    except:
        g_logger.error(str(traceback.format_exc()))


csocs CuSlave(object):
    def __init__(self, logger):
        self.logger = logger
        # 读取 jdbc中的 collecter.center.conf.path
        # collecter.center.dubbo.port
        # kafka.host
        self.cuconfig = cuMgr.get_config()
        self.deploy_mode = self.cuconfig.get("deployMode", "LAS")
        # collecter.outside
        if not check_build_in():
            self.outside = True
            kafkaMgr = ConfigManager(KAFKA_CONF_PATH, g_logger)
            self.kafkaHost = kafkaMgr.get_kafka_config('kafka.host')
            self.collecterHash = get_hd_hash()
        else:
            self.outside = False
            self.jdbcconfig = jdbcMgr.get_jdbc_all_config()
            self.kafkaHost = self.jdbcconfig.get("kafka.host", "")
            # es.ipAndPort
            self.ipAndPortStr = self.jdbcconfig.get("es.ipAndPort", "")
            # collecter.hash
            self.collecterHash = self.jdbcconfig.get("collecter.hash", "")
        self.logger = logger
        self.init = False
        self.cu_ip = get_cu_ip()
        self.soc_ip = get_soc_ip()
        self.masterMap = {}
        self.configMap = {}
        self.access_log = []
        self.stopES = True
        self.pipeline_name_list = ['info.json', 'pipeline.json', 'rules.json', 'uiinfo.json']
        self.oom_start_time = 0
        self.start_error_stop_error = []

    def check_soc(self):
        return self.deploy_mode == "LAS"

    def scheduled(self):
        try:
            self.logger.info("scheduled start")
            self._syncInternalConfig()
            self._syncConfig()
            self._syncPipeline()
            self._stopEsPipeline()
            if self._isEsDeployed() and self.check_soc():
                self._reloadHangoutConf()
        except Exception as e:
            self.logger.error(traceback.format_exc())
        scheduled_timer = threading.Timer(60, self.scheduled)
        scheduled_timer.start()


    def monitor_access_pipeline(self):
        try:
            self.monitor_port()
        except Exception as e:
            self.logger.error(traceback.format_exc())
        monitor_port_timer = threading.Timer(2100, self.monitor_access_pipeline)
        monitor_port_timer.start()

    def monitor_port(self):
        self.logger.info("monitor_port starting")
        # 端口监控
        # 场景：连接数据库 获取所有access类型pipeline  获取端口信息， 根据端口信息判断 端口是否存在 如果不存在重启streamsets
        rets = []
        orchid = get_orchid()
        url = "/collecter/v1/dataAcessMonitor/ports"
        if orchid.get(url, None):
            rets = orchid.getResult()
        is_exist = True
        for ret in rets:
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.settimeout(3)  # timeout
            host = ('127.0.0.1', int(ret))
            try:  # Try connection to the host
                s.connect(host)
            except socket.error as e:
                self.logger.warn("port %s not listen" % ret)
                self.logger.warn(traceback.format_exc())
                is_exist = False
                break
        if not is_exist:
            run_cmd_by_root("/opt/epic/soc/bin/cu-streamsets-stop.sh")
            run_cmd_by_root("/opt/epic/soc/bin/cu-streamsets-start.sh")
            self.logger.info("restart streamsets")

    def monitor_status(self, monitor_times, sleep_time, pipelines_list, tick_time, retry_times):
        # status
        # starting stopping start_error stop_error
        exception_pipeline_name = []
        try:
            if pipelines_list:
                pipelines_status_config = pipelines_list[1]
                for pipeline_status in pipelines_status_config:
                    # single pipeline status monitor
                    status = pipeline_status.get("status")
                    if status == "STARTING":
                        # enter monitor config
                        self.logger.info("monitor pipeline name %s status is %s",
                                         (pipeline_status.get("name"), pipeline_status.get("status")))
                        for i in range(0, monitor_times):
                            # check current pipeline status meet running or stopped break
                            time.sleep(sleep_time)
                            # get next time pipeline status
                            if streamsetsrest.checkPipeline_not_rest("127.0.0.1", pipeline_status.get("name")):
                                break
                            if i == monitor_times - 1:
                                exception_pipeline_name.append(pipeline_status.get("name"))
                                self.logger.info("monitor pipeline pipeline name %s status is starting ", pipeline_status.get("name"))

                    elif status == "STOPPING":
                        self.logger.info("monitor pipeline name %s status is %s",
                                         (pipeline_status.get("name"), pipeline_status.get("status")))
                        # enter monitor config
                        for i in range(0, monitor_times):
                            # check current pipeline status meet running or stopped break
                            time.sleep(sleep_time)
                            # get next time pipeline status
                            if streamsetsrest.checkPipeline_not_rest_stop("127.0.0.1", pipeline_status.get("name")):
                                break
                            if i == monitor_times - 1:
                                exception_pipeline_name.append(pipeline_status.get("name"))
                                self.logger.info("monitor pipeline pipeline name %s status is stopping  ", pipeline_status.get("name"))

                    elif status == "START_ERROR":
                        self.logger.info("monitor pipeline name %s status is START_ERROR", pipeline_status.get("name"))
                        # monitor pipeline start_err status if meet config condition,  most of bug ,send
                        # message to pg table tbl_msg_center
                        # 多次尝试如果启动失败了，没有变成running 还会回到start_error ,是可以再次启动，如果条件ok的话，会变成running
                        for i in range(0, retry_times):
                            if pipeline_status.get("name") in self.start_error_stop_error:
                                break
                            streamsetsrest.startPipeline(pipeline_status.get("name"))
                            time.sleep(tick_time)
                            if streamsetsrest.checkPipeline_not_rest("127.0.0.1", pipeline_status.get("name")):
                                break

                        if streamsetsrest.checkPipeline_not_rest("127.0.0.1", pipeline_status.get("name")):
                            pass
                        else:
                            if pipeline_status.get("name") not in self.start_error_stop_error:
                                self.logger.error("monitor pipeline start error force start fail pipeline %s ",
                                                  pipeline_status.get("name"))
                                if self.outside or is_slave():
                                    send_message_center("START_ERROR", False)
                                else:
                                    send_message_center("START_ERROR", True)
                                self.start_error_stop_error.append(pipeline_status.get("name"))

                    elif status == "STOP_ERROR":
                        # monitor pipeline stop_err status if meet config condition,  most of time too slow ,send
                        # message to pg table tbl_msg_center
                        # 强制停止force stop，大部分情况,如果失败发送信息到信息中心
                        self.logger.info("monitor pipeline name %s status is STOP_ERROR", pipeline_status.get("name"))
                        for i in range(0, retry_times):
                            if pipeline_status.get("name") in self.start_error_stop_error:
                                break
                            streamsetsrest.forceStopPipeline(pipeline_status.get("name"))
                            time.sleep(tick_time)
                            if streamsetsrest.checkPipeline_not_rest_stop("127.0.0.1", pipeline_status.get("name")):
                                break
                        # 如果尝试多次，还是失败，发送信息到消息中心
                        if streamsetsrest.checkPipeline_not_rest_stop("127.0.0.1", pipeline_status.get("name")):
                            pass
                        else:
                            if pipeline_status.get("name") not in self.start_error_stop_error:
                                self.logger.error("monitor pipeline start error force start fail pipeline %s ",
                                                  pipeline_status.get("name"))
                                if self.outside or is_slave():
                                    send_message_center("STOP_ERROR", False)
                                else:
                                    send_message_center("STOP_ERROR", True)
                                self.start_error_stop_error.append(pipeline_status.get("name"))
                    # elif status == "STOPPED":
                    #     # stopped check pipeline history state over 60*60 to start in order to a lot of pipeline stopped
                    #     self.logger.info("monitor pipeline name %s status is STOPPED", pipeline_status.get("name"))
                    #     histroy_status = streamsetsrest.getPipelineHistory(pipeline_status.get("name"))
                    #     if is_start_stopped_pipeline(histroy_status):
                    #         streamsetsrest.startPipeline_not_rest("127.0.0.1", pipeline_status.get("name"))
                    #         self.logger.info("monitor pipeline name %s history status is STOPPED and start it", pipeline_status.get("name"))
                    #     else:
                    #         self.logger.info("monitor pipeline name %s no  history status,ignore it ", pipeline_status.get("name"))
                    else:
                        pass
            else:
                pass
        except Exception as e:
            self.logger.error(traceback.format_exc())

        return exception_pipeline_name

    def monitor_pipeline(self):
        # 监控pipeline的稳定性 通过配置监控pipeline的周期参数，来定时检查
        # 1.pipeline starting,stopping status
        # 2.streamsets oom
        # 3.pipline start_err,stop_err status

        # get monitor pipeline configure
        self.logger.info("monitor pipeline start ...")
        monitor_config = read_conf()
        # restart flag
        restart_streamsets = False

        # monitor pipeline starting,stopping status if meet config condition, and restart streamsets
        exception_pipeline_name = []
        sleep_time = monitor_config.get("starting_stopping").get("tick_time", 20)
        rest_times = monitor_config.get("starting_stopping").get("rest_times", 20)

        tick_time = monitor_config.get("start_err_and_stop_err").get("tick_time", 20)
        retry_times = monitor_config.get("start_err_and_stop_err").get("rest_times", 20)
        all_pipeline = []
        try:
            # get CONTROL pipeline status
            pipelines_control = streamsetsrest.getPipelines(CONTROL, "true")
            if pipelines_control:
                all_pipeline.extend(pipelines_control[1])
            exception_pipeline_name.extend(self.monitor_status(rest_times, sleep_time, pipelines_control, tick_time, retry_times))
            # get PARQUET pipeline status
            pipelines_paruet = streamsetsrest.getPipelines(PARQUET, "true")
            if pipelines_paruet:
                all_pipeline.extend(pipelines_paruet[1])
            exception_pipeline_name.extend(self.monitor_status(rest_times, sleep_time, pipelines_paruet, tick_time, retry_times))
            # get STORE pipeline status
            pipelines_store = streamsetsrest.getPipelines(STORE, "true")
            if pipelines_store:
                all_pipeline.extend(pipelines_store[1])
            exception_pipeline_name.extend(self.monitor_status(rest_times, sleep_time, pipelines_store, tick_time, retry_times))
            # get PREVIEW pipeline status
            pipelines_preview = streamsetsrest.getPipelines(PREVIEW, "true")
            if pipelines_preview:
                all_pipeline.extend(pipelines_preview[1])
            exception_pipeline_name.extend(self.monitor_status(rest_times, sleep_time, pipelines_preview, tick_time, retry_times))

            # get database pipeline status
            pipelines_database = streamsetsrest.getPipelines(self.collecterHash, "true")
            if pipelines_database:
                all_pipeline.extend(pipelines_database[1])
            exception_pipeline_name.extend(self.monitor_status(rest_times, sleep_time, pipelines_database, tick_time, retry_times))
        except Exception as e:
            self.logger.error(traceback.format_exc())

        # accord to result deal with streamsets
        if len(all_pipeline) > 0:
            percentage = int((len(exception_pipeline_name)/(float(len(all_pipeline))))*100)
            self.logger.info("monitor pipeline starting stopping percentage : %s ", percentage)
            if percentage >= monitor_config.get("starting_stopping").get("threshold", 2):
                self.logger.info("monitor pipeline starting stopping execption , and need restart streamsets")
                restart_streamsets = True

        # monitor streamsets service if meet OOM, and restart streamsets
        oom_flag = False
        try:
            count_oom = check_is_oom()
            filename = "/opt/epic/soc/conf/oom.json"
            if count_oom >= 1:
                # current time
                if os.path.exists(filename):
                    current_time = int(time.time())
                    curr_value = read_oom_conf(filename)

                    try:
                        count = curr_value['times']
                        pre_time = curr_value['timestramp']
                        if count_oom == count:
                            context = {}
                            context['timestramp'] = current_time
                            context['times'] = count_oom
                            write_oom_conf(filename, context)
                        else:
                            need = current_time - pre_time
                            if need < 24*60*60:
                                oom_flag = True
                                context = {}
                                context['timestramp'] = current_time
                                context['times'] = count_oom
                                write_oom_conf(filename, context)
                    except  Exception as e:
                        pass

                else:
                    current_time = int(time.time())
                    context = {}
                    context['timestramp'] = current_time
                    context['times'] = count_oom
                    oom_flag = True
                    write_oom_conf(filename, context)
            else:
                # current time
                if os.path.exists(filename):
                    # exist delete
                    os.remove(filename)

        except Exception as e:
            self.logger.error(traceback.format_exc())

        if oom_flag:
            self.logger.info("monitor pipeline oom need restart streamsets ...")
            restart_streamsets = True

        # 根据情况判断是否要重启18630
        if restart_streamsets:
            # 重启 streamsets
            run_cmd_by_root("/opt/epic/soc/bin/cu-streamsets-stop.sh")
            time.sleep(1)
            self.logger.info("monitor pipeline start restart streamsets ...")
            run_cmd_by_root("/opt/epic/soc/bin/cu-streamsets-start.sh")
        else:
            if is_slave() or self.outside:
                pass
            else:
                # 根据hivemgr下发的策略，进行强行停止pipeline,放在redis,直接去取就行了
                web_need_force_stop = []
                try:
                    if len(web_need_force_stop) > 0:
                        for pipeline_name in web_need_force_stop:
                            for i in xrange(0, 10):
                                streamsetsrest.forceStopPipeline(pipeline_name, "127.0.0.1")
                                time.sleep(5)
                                if streamsetsrest.checkPipeline_not_rest_stop("127.0.0.1", pipeline_name):
                                    break
                except Exception as e:
                    self.logger.error(traceback.format_exc())

        # delete pipeline history status file
        # run_cmd_by_root("find /opt/epic/soc/conf/streamsets/data/runInfo/ -name pipelineStateHistory.json |xargs rm -rf {}\;", True)
        self.logger.info("monitor pipeline end ...")
        monitor_pipeline = threading.Timer(600, self.monitor_pipeline)
        monitor_pipeline.start()

    def uploadMonitorData(self):
        try:
            self.logger.info("uploadMonitorData start")
            self._uploadCollecterMonitorData()
            if is_slave():
                self._uploadSlaveNodeMonitorData()
        except Exception as e:
            self.logger.error(traceback.format_exc())
        scheduled_timer = threading.Timer(10, self.uploadMonitorData)
        scheduled_timer.start()

    def _uploadCollecterMonitorData(self):
        ret = {}
        orchid = get_orchid()
        if orchid.get("/collecter/v1/dataAcessMonitor/datasource/hash", {"collector": self.collecterHash}):
            dsInfo = orchid.getResult()
            # 调用streamsets获取监控信息
            for dsUuid in dsInfo.keys():
                rateMap = {}
                ds = dsInfo.get(dsUuid)
                if ds.get("access"):
                    accessPipelineName = ds.get("access")
                    accessRate = self._getMetricsByPipelineNameAndLogsourceid(accessPipelineName, dsUuid)[0]
                    rateMap["access"] = accessRate
                if ds.get("parse"):
                    totalCount = 0
                    totalParseRate = 0
                    pipelineNames = ds.get("parse")
                    for parsePipelineName in pipelineNames:
                        rets = self._getMetricsByPipelineNameAndLogsourceid(parsePipelineName, dsUuid)
                        parseRate = rets[0]
                        count = rets[1]
                        totalCount += count
                        totalParseRate += parseRate
                    rateMap["parse"] = totalParseRate
                    rateMap["processed"] = totalCount
                # 有些数据源没有接入pipeline，用parsepipeline的数据来代替
                if rateMap.get("access") is None:
                    rateMap["access"] = rateMap.get("parse")
                ret[dsUuid] = rateMap
            if self.outside:
                orchid = get_orchid()
                if orchid.post("/collecter/v1/dataAcessMonitor/report/monitor", ret,
                               {"slaveIp": self.collecterHash}):
                    self.logger.debug("调用接口成功，将监控结果上传到平台")
                else:
                    self.logger.error("调用接口失败，将监控结果上传到平台失败 异常：%s" % orchid.getError())
            else:
                if orchid.post("/collecter/v1/dataAcessMonitor/report/monitor", ret,
                               {"slaveIp": self.collecterHash + "_" + get_node_ip()}):
                    self.logger.debug("调用接口成功，将监控结果上传到平台")
                else:
                    self.logger.error("调用接口失败，将监控结果上传到平台失败 异常：%s" % orchid.getError())

    def _uploadSlaveNodeMonitorData(self):
        try:
            ret = {}
            orchid = get_orchid()
            if orchid.get("/collecter/v1/dataAcessMonitor/datasource/info", None):
                storeInfo = orchid.getResult()
                for dsUuid in storeInfo.keys():
                    dsStoreRate = 0
                    pipelineNames = storeInfo.get(dsUuid)
                    for pipelineName in pipelineNames:
                        storeRate = self._getMetricsByPipelineNameAndLogsourceid(pipelineName, dsUuid)[0]
                        dsStoreRate += storeRate
                    ret[dsUuid] = dsStoreRate
                if orchid.post("/collecter/v1/dataAcessMonitor/report/monitor", ret,
                               {"slaveIp": get_node_ip()}):
                    self.logger.debug("调用接口成功，将监控结果上传到平台")
                else:
                    self.logger.error("调用接口失败，将监控结果上传到平台失败 异常：%s" % orchid.getError())
        except Exception as e:
            self.logger.error(traceback.format_exc())

    def _getMetricsByPipelineNameAndLogsourceid(self, pipelineName, logSourceId):
        resultList = []
        try:
            monitor_path = DIRECTORY + logSourceId + "." + pipelineName
            for i in range(10):
                try:
                    if os.path.exists(monitor_path):
                        with open(monitor_path) as f:
                            meters = json.load(f)
                            rate = meters.get("rate", 0)
                            count = meters.get("count", 0)
                            resultList.append(rate)
                            resultList.append(count)
                            break
                    else:
                        resultList.append(0.0)
                        resultList.append(0)
                        break
                except Exception as e:
                    if i < 9:
                        time.sleep(0.03)
                    else:
                        resultList.append(0.0)
                        resultList.append(0)
                    self.logger.warn(traceback.format_exc())
        except Exception as e:
            self.logger.error(traceback.format_exc())
        return resultList

    def _syncInternalConfig(self):
        try:
            if not self.init:
                pipeline_list = fileStrList(INTERNAL_PATH)
                for pipeline in pipeline_list:
                    config = pipeline.get(PIPELINECONFIG)
                    if is_master() and not self.outside:
                        if self.check_soc():   # LAS 不需要内置
                            if PARQUET in config.get("metadata").get("labels"):
                                continue
                            if MOVE in config.get("metadata").get("labels"):
                                continue
                            if STORE in config.get("metadata").get("labels"):
                                continue
                            if "syslog-service" == config.get(TITLE):
                                continue
                    elif is_slave():    # 从节点不需要内置
                        if PARQUET in config.get("metadata").get("labels"):
                            continue
                        if MOVE in config.get("metadata").get("labels"):
                            continue
                        if PREVIEW in config.get("metadata").get("labels"):
                            continue
                    elif self.outside:    # 外采不需要内置
                        if PARQUET in config.get("metadata").get("labels"):
                            continue
                        if MOVE in config.get("metadata").get("labels"):
                            continue
                        if STORE in config.get("metadata").get("labels"):
                            continue
                        if PREVIEW in config.get("metadata").get("labels"):
                            continue
                    streamsetsrest.importPipeline(config.get(TITLE), 'true', pipeline)
                    time.sleep(0.01)
                    if MOVE in config.get("metadata").get("labels"):
                        continue
                    if "test-parse" == config.get(TITLE):
                        continue
                    streamsetsrest.startPipeline_not_rest(self.cu_ip, pipeline.get("pipelineConfig").get("title"))
                cleanConfigFile(INTERNAL_PATH)
                self.init = True
        except Exception as e:
            self.logger.error(traceback.format_exc())

    def _syncConfig(self):
        try:
            orchid = get_orchid()
            if orchid.get("/collecter/v1/config/syncConfig", None):
                collectConfigList = orchid.getResult()
                for collectConfig in collectConfigList:
                    try:
                        name = collectConfig.get("name")
                        file = collectConfig.get('file', {})
                        fileMd5 = md5(file)
                        if not collectConfig.get('name', "") in self.configMap.keys():
                            writeConfigToFile(RESOURCE_PATH, collectConfig.get("name", ""), collectConfig.get('file', ""))
                            self.configMap[collectConfig.get("name")] = fileMd5
                        elif self.configMap.get(file, "") != fileMd5:
                            writeConfigToFile(RESOURCE_PATH, collectConfig.get("name", ""), collectConfig.get('file', ""))
                            self.configMap[collectConfig.get("name")] = fileMd5
                        if name.endswith(".access"):
                            fileData = collectConfig.get('file', "")
                            key_list = json.loads(fileData).keys()
                            if len(key_list) > 0:
                                for access_key in key_list:
                                    if "topic" in access_key:
                                        self.access_log.append(json.loads(fileData).get(access_key))
                    except Exception as e1:
                        self.logger.error(traceback.format_exc())

        except Exception as e:
            self.logger.error(traceback.format_exc())

    def _syncPipeline(self):
        try:
            if self.outside or is_slave():
                socIp = get_soc_ip()
                if socIp != self.soc_ip:
                    self.soc_ip = socIp
                    # streamsetsrest.restart(self.cu_ip)
                masterPipelines = self._getMasterPipeline(socIp)
                # get slave pipeline status
                slavePipelinesDict = get_slave_pipelines_dict(self._getMasterPipeline(self.cu_ip))
                leastPipelines = []
                for master_pipeline in masterPipelines:
                    pipelineName = master_pipeline["name"]
                    leastPipelines.append(pipelineName)
                    try:
                        if self.masterMap.get(pipelineName) != None:
                            if master_pipeline[UUID] != self.masterMap.get(pipelineName):
                                pipelineInfo = streamsetsrest.getPipeline(pipelineName, socIp)
                                try:
                                    self.masterMap[pipelineName] = pipelineInfo.get(UUID)
                                    streamsetsrest.postPipeline_not_rest(self.cu_ip, pipelineInfo)
                                except Exception as e:
                                    self.logger.warn("pipeline not exits, pipelineName: " + pipelineName)
                                    self.logger.error(traceback.format_exc())
                            else:
                                if master_pipeline['status'] == "STOPPED" and slavePipelinesDict.get(pipelineName).get('status') != "STOPPED":
                                    try:
                                        streamsetsrest.stopPipeline_not_rest(self.cu_ip, pipelineName)
                                    except Exception as e:
                                        self.logger.warn("pipeline not exits, pipelineName: " + pipelineName)
                                        self.logger.error(traceback.format_exc())
                                elif master_pipeline['status'] == 'RUNNING' and slavePipelinesDict.get(pipelineName).get('status') != "RUNNING":
                                    try:
                                        if slavePipelinesDict.get(pipelineName).get('status') == "START_ERROR" and master_pipeline['title'].find('ftp_access') > 0:
                                            self.logger.warn("ftp pipeline start_error,continue, pipelineName: " + pipelineName)
                                            continue
                                        streamsetsrest.startPipeline_not_rest(self.cu_ip, pipelineName)
                                    except Exception as e:
                                        self.logger.warn("pipeline start, pipelineName: " + pipelineName)
                                        self.logger.error(traceback.format_exc())
                                else:
                                    pass
                        else:
                            pipelineInfo = streamsetsrest.getPipeline(pipelineName, socIp)
                            config = streamsetsrest.exportPipeline(pipelineName, socIp)
                            try:
                                streamsetsrest.stopPipeline_not_rest(self.cu_ip, pipelineName)
                            except Exception as e:
                                self.logger.warn("pipeline not exits, pipelineName: " + pipelineName)
                                self.logger.error(traceback.format_exc())
                            streamsetsrest.importPipeline(pipelineName, 'true', config, self.cu_ip)
                            streamsetsrest.startPipeline_not_rest(self.cu_ip, pipelineName)
                            self.masterMap[pipelineName] = pipelineInfo.get(UUID)
                    except Exception as e:
                        continue
                self._removeCuPipeline(leastPipelines)
                if not self.outside:
                    control(self.cu_ip, STORE)
                control(self.cu_ip, CONTROL)
                control(self.cu_ip, self.collecterHash)
            else:
                delete_pipeline(self.cu_ip)
                control(self.cu_ip, CONTROL)
                control(self.cu_ip, PREVIEW)
                control(self.cu_ip, self.collecterHash)
                control(self.cu_ip, STORE)
                control(self.cu_ip, PARQUET)
        except Exception as e:
            self.logger.error(traceback.format_exc())

    def _stopEsPipeline(self):
        try:
            if self.stopES:
                pipelines = streamsetsrest.getPipelines(ES, "true", self.cu_ip)
                pipelineStatus = pipelines[1]
                for i in xrange(len(pipelineStatus)):
                    pipeline = pipelineStatus[i]
                    try:
                        if pipeline.get("status", "") == "RUNNING":
                            streamsetsrest.stopPipeline(pipeline.get("name"), self.cu_ip)
                    except Exception as e:
                        self.logger.error(traceback.format_exc())
                        continue
                self.stopES = False
        except Exception as e:
            self.logger.error(str(e))

    def _reloadHangoutConf(self):
        try:
            if self.outside:
                return
            if not os.path.exists(hangout_conf):
                return
            self.ipAndPortStr = self.jdbcconfig.get("es.ipAndPort")
            self.kafkaHost = self.jdbcconfig.get("kafka.host")
            if "," in self.ipAndPortStr:
                ipAndPorts = self.ipAndPortStr.split(",")
            else:
                ipAndPorts = [self.ipAndPortStr]
            isChange = False
            common_data = None
            if os.path.exists(common_topic):
                with open(common_topic) as f:
                    common_data = json.load(f)
            # 解析 yaml
            with open(hangout_conf) as f:
                configs = yaml.load(f)
                inputConfigs = configs.get("inputs")
                if inputConfigs and len(inputConfigs) > 0:
                    for inputConfig in inputConfigs:
                        kafkaConfigs = inputConfig.get("NewKafka")
                        if kafkaConfigs:
                            topics = kafkaConfigs.get("topic")
                            if len(self.access_log) > 0:
                                for accessLog in self.access_log:
                                    accessLog_encode = accessLog.encode('unicode-escape').decode('string_escape')
                                    if not (accessLog_encode + "_parse") in topics.keys():
                                        if accessLog_encode.startswith("nginx_") or accessLog_encode.startswith(
                                                "iis_") or accessLog_encode.startswith("apache_"):
                                            continue
                                        else:
                                            topics[accessLog_encode + "_parse"] = 1
                                            isChange = True
                            if common_data and len(common_data) > 0:
                                db_list = common_data.get("db_topic", [])
                                for topic in db_list:
                                    if not (topic + "_parse") in topics:
                                        topic_encode = topic.encode('unicode-escape').decode('string_escape')
                                        topics[topic_encode + "_parse"] = 1
                                        isChange = True
                                ftp_list = common_data.get("ftp_topic", [])
                                for topic in ftp_list:
                                    if not (topic + "_parse") in topics:
                                        topic_encode = topic.encode('unicode-escape').decode('string_escape')
                                        topics[topic_encode + "_parse"] = 1
                                        isChange = True
                            consumerSettings = kafkaConfigs.get("consumer_settings")
                            if consumerSettings:
                                if consumerSettings.get("bootstrap.servers") != self.kafkaHost:
                                    if self.kafkaHost is None:
                                        self.kafkaHost = consumerSettings.get("bootstrap.servers")
                                    consumerSettings["bootstrap.servers"] = self.kafkaHost
                                    isChange = True
                outputConfigs = configs.get("outputs")
                if outputConfigs:
                    for outputConfig in outputConfigs:
                        esConfigs = outputConfig.get("Esocticsearch")
                        if esConfigs:
                            hosts = esConfigs.get("hosts")
                            if hosts:
                                for ipInfo in ipAndPorts:
                                    if "127.0.0.1" in hosts:
                                        hosts.remove("127.0.0.1")
                                    if ipInfo not in hosts:
                                        hosts.append(ipInfo)
                                        isChange = True
            if isChange:
                with open(hangout_conf, 'w') as f:
                    yaml.dump(configs, f)
                self.logger.warn("the hangout service will restart!")
                run_cmd_by_root('/opt/epic/soc/bin/stop_hangout.sh')
                run_cmd_by_root('/opt/epic/soc/bin/start_hangout_service.sh')
        except Exception as e:
            self.logger.error(traceback.format_exc())

    def _isEsDeployed(self):
        orchid = get_orchid()
        if orchid.get("/collecter/v1/dataAcessMonitor/esDeployed", None):
            ret = orchid.getResult()
            return ret
        else:
            return False

    def _removeCuPipeline(self, newPipelines):
        result = []
        oldPipelines = self.masterMap.keys()
        result += oldPipelines
        for newPipeline in newPipelines:
            if newPipeline in result:
                result.remove(newPipeline)
        for name in result:
            streamsetsrest.stopPipeline_not_rest(self.cu_ip, name)
            try:
                streamsetsrest.deletePipeline(name, self.cu_ip)
                self.masterMap.pop(name)
            except Exception as e:
                pass

    def _getMasterPipeline(self, soc_ip):
        pipelines = []
        try:
            parsePipelines = self._deal_pipeline_info(streamsetsrest.pipelines(PARSE, "", True, soc_ip))
            pipelines = pipelines + parsePipelines
            accessPipelines = self._deal_pipeline_info(streamsetsrest.pipelines(ACCESS, "", True, soc_ip))
            pipelines = pipelines + accessPipelines
            if not self.outside:
                storePipeline = self._deal_pipeline_info(streamsetsrest.pipelines(STORE, "", True, soc_ip))
                pipelines = pipelines + storePipeline
            collector = self._deal_pipeline_info(streamsetsrest.pipelines(self.collecterHash, "", True, soc_ip))
            if self.outside and collector is not None and len(collector) > 0:
                pipelines = pipelines + collector
        except Exception as e:
            self.logger.warn(traceback.format_exc())
            raise e
        return pipelines

    def _deal_pipeline_info(self, pipeline_info):
        pipelines = []
        try:
            if len(pipeline_info) == 2:
                for pipeline in pipeline_info[0]:
                    name = pipeline['name']
                    for pipeline_status in pipeline_info[1]:
                        if pipeline_status['name'] == name:
                            pipeline['status'] = pipeline_status['status']
                            break
                    pipelines.append(pipeline)
        except Exception as e:
            self.logger.warn(traceback.format_exc())
        return pipelines


    def run(self):
        # 启动定时器
        scheduledTimer = threading.Timer(60, self.scheduled)
        scheduledTimer.start()
        monitorTimer = threading.Timer(1, self.uploadMonitorData)
        monitorTimer.start()
        monitor_pipeline = threading.Timer(600, self.monitor_pipeline)
        monitor_pipeline.start()
        # monitor_port_Timer = threading.Timer(60, self.monitor_access_pipeline)
        # monitor_port_Timer.start()


def main():
    logger = getLogger("slave")
    monitor = CuSlave(logger)
    # monitor.scheduled()
    # monitor.uploadMonitorData()
    # monitor._reloadHangoutConf(
    # 启动常驻slave 定时同步pipeline 定时同步配置 定时监控pipeline
    monitor.run()


if __name__ == '__main__':
    main()
