package com.cetc.sdp.kmga.cs.stream;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.cetc.sdp.kmga.cs.audit.*;
import com.cetc.sdp.kmga.cs.common.IndexTable;
import com.cetc.sdp.kmga.cs.common.RabbitMQConf;
import com.cetc.sdp.kmga.cs.common.TableMapping;
import com.cetc.sdp.kmga.cs.device.DevNotify;
import com.cetc.sdp.kmga.cs.jdbc.AuditDAO;
import com.cetc.sdp.kmga.cs.jdbc.DeviceDAO;
import com.cetc.sdp.kmga.cs.schema.v2.AuditDbSchemaDescV2;
import com.cetc.sdp.kmga.cs.util.*;
import com.rabbitmq.client.QueueingConsumer;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.AbstractJavaRDDLike;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.*;
import org.apache.spark.streaming.rabbitmq.RabbitMQUtils;
import org.apache.spark.streaming.rabbitmq.distributed.JavaRabbitMQDistributedKey;
import org.apache.spark.streaming.rabbitmq.models.ExchangeAndRouting;
import org.apache.spark.util.LongAccumulator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;

import java.io.IOException;
import java.io.Serializable;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;

import static com.cetc.sdp.kmga.cs.stream.AuditStream.NodeDataWrapper.FTP;
import static com.cetc.sdp.kmga.cs.stream.StreamUtils.*;

/**
 * @description: 数据库审计事件日志, Spark Streaming 实时流处理
 * @author： DengQiang
 * @date: 2017/8/17 16:00
 * @deprecated This class has replace by {@link com.cetc.sdp.kmga.cs.stream.nv.AuditStreamDriver}
 */
@Deprecated
public class AuditStream {

    private static final int ARGS_LEN = 3;
    private static final int RANDOM_LEN = 3;

    public static void main(String[] args) throws InterruptedException, IOException {
        if (args.length < ARGS_LEN) {
            System.out.println("Usage：<Application Name> <Batch duration> <window> <Receive Source, values: kafka or rabbitmq>");
            System.exit(1);
        }
        Logger logger = LoggerFactory.getLogger(AuditStream.class);
        String appName = args[0];
        int duration = Integer.valueOf(args[1]);
        int window = Integer.valueOf(args[2]);
        boolean useKafkaRecv = "kafka".equals(args[3]);
        boolean useRabbitmqRecv = "rabbitmq".equals(args[3]);

        StreamingConfiguration streamingConfiguration = new StreamingConfiguration("audit-config.xml");
        String topic = streamingConfiguration.getAppProperties().get("kafka.stream.topic");
        boolean enableAutoBroadcast = Boolean.parseBoolean(streamingConfiguration.getAppProperties().getOrDefault("enable.auto.broadcast", "false"));
        boolean enableSaveErrorLog = Boolean.parseBoolean(streamingConfiguration.getAppProperties().getOrDefault("save.error.log", "true"));
        int broadcastUpdateInterval = Integer.parseInt(streamingConfiguration.getAppProperties().getOrDefault("broadcast.interval.ms", "3600000"));
        //告警等级
        int alertLevel = Integer.parseInt(streamingConfiguration.getAppProperties().get("event.alert.level"));
        //设置rabbitmq 参数
        Map<String, String> rabbitmqProp = streamingConfiguration.getRabbitmqProperties();
        String mqHost = rabbitmqProp.get("rabbitmq.host");
        int mqPort = Integer.parseInt(rabbitmqProp.getOrDefault("rabbitmq.port", "7672"));
        String mqUsername = rabbitmqProp.get("rabbitmq.username");
        String mqPassword = rabbitmqProp.get("rabbitmq.password");
        String mqNotifyQueue = rabbitmqProp.get("rabbitmq.notify.queue");
        String mqNotifyExchange = rabbitmqProp.get("rabbitmq.notify.exchange");
        String mqDevExchange = rabbitmqProp.get("rabbitmq.dev.exchange");
        String mqAuditAlertExch = rabbitmqProp.get("rabbitmq.audit.alert.exchange");
        String mqAuditAlertQueue = rabbitmqProp.get("rabbitmq.audit.alert.queue");
        String mqLogQueue = rabbitmqProp.get("rabbitmq.log.queue");
        String mqNodeList = rabbitmqProp.get("rabbitmq.node.list");
        RabbitMQConf mqConf = new RabbitMQConf();
//        mqConf.setMqHost(mqHost);
//        mqConf.setMqPort(mqPort);
        mqConf.setMqUsername(mqUsername);
        mqConf.setMqPassword(mqPassword);
        mqConf.setMqNotifyQueue(mqNotifyQueue);
        mqConf.setMqAuditAlertQueue(mqAuditAlertQueue);
        mqConf.setMqAuditAlertExch(mqAuditAlertExch);
        String[] nodes = mqNodeList.split(",");
        for (String node : nodes) {
            String[] ip = node.split(":");
            if (ip.length == 2) {
                mqConf.addNode(ip[0], Integer.parseInt(ip[1]));
            }
        }

        TableMapping tableMapping = AuditDAO.findTableMetaInfoByTopic(topic).get();

        Map<String, String> rabbitmqParams = new HashMap<>(32);
        rabbitmqParams.put("hosts", mqNodeList);
        rabbitmqParams.put("vHost", "/");
        rabbitmqParams.put("userName", mqUsername);
        rabbitmqParams.put("password", mqPassword);
        rabbitmqParams.put("ackType", "auto");
        rabbitmqParams.put("maxReceiveTime", args[4]);
        rabbitmqParams.put("maxMessagesPerPartition", args[5]);
        rabbitmqParams.put("levelParallelism", String.valueOf(tableMapping.getNumOfPartition()));

        //初始化上下文环境
        SparkConf conf = new SparkConf().setAppName(appName);
        //使用Kryo序列化库
        conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
        //在Kryo序列化库中注册自定义的类集合
        conf.set("spark.kryo.registrator", EventKryoRegistrator.class.getName());
        JavaSparkContext jsc = new JavaSparkContext(conf);
        jsc.setLogLevel("ERROR");
        JavaStreamingContext jssc = new JavaStreamingContext(jsc, Durations.seconds(duration));
        jsc.setCheckpointDir("/AuditStreamCheckPoint");

        //设置kafka参数
        Map<String, Object> kafkaParams = new HashMap<>(16);
        kafkaParams.putAll(streamingConfiguration.getKafkaProperties());

        //Config Hbase output job
        Map<String, String> hbaseMainJobs = new HashMap<>(16);
        Map<String, Map<String, IndexTable>> hbaseIndexJobs = new HashMap<>(16);
        boolean updateByMonth = tableMapping.getAutoAppendSuffix() == 1;
        tableMapping.getSplitTables().forEach((sqlitKey, tableName) -> {
            hbaseMainJobs.put(sqlitKey, tableName);
        });
        HashMap<Integer, IndexTable> indexTableHashMap = new HashMap<>(16);
        tableMapping.getSplitIndexTables().forEach((splitKey, indexTables) -> {
            Map<String, IndexTable> valMap = hbaseIndexJobs.computeIfAbsent(splitKey, k -> new HashMap<>(16));
            indexTables.forEach(indexTable -> {
                valMap.put(indexTable.getTableName(), indexTable);

                String indexTableName = indexTable.getTableName();
                if (indexTableName.endsWith("CLIENT_IP")) {
                    indexTableHashMap.put(IndexTableId.CLIENT_IP, indexTable);
                } else if (indexTableName.endsWith("INDEX")) {
                    indexTableHashMap.put(IndexTableId.INDEX, indexTable);
                }

            });
        });
        Job auidtJob = configJob(jsc, streamingConfiguration.getHbaseProperties());

        //启动审计规则消息接收线程
        RabbitMQConsumer auditMsgConsumer = new RabbitMQConsumer();
//        auditMsgConsumer.setHost(mqHost);
//        auditMsgConsumer.setPort(mqPort);
        mqConf.getNodes().forEach(auditMsgConsumer::addAddress);
        auditMsgConsumer.setQueue(mqNotifyQueue);
        auditMsgConsumer.setUserName(mqUsername);
        auditMsgConsumer.setPassword(mqPassword);
        auditMsgConsumer.setExchange(mqNotifyExchange);
        auditMsgConsumer.setXt(RabbitMQProducer.XT.FANOUT);
        auditMsgConsumer.addMsgListener(RuleFastMatcherBroadcastWrapper.getInstance(jsc));
        auditMsgConsumer.addMsgListener(ClientIpClusterBroadcastWrapper.getInstance(jsc));
        auditMsgConsumer.start();

        //初始化设备列表
        List<Tuple2<String, Integer>> allDevices = DeviceDAO.findAllDevice();
        Set<String> devSet = new HashSet<>();
        allDevices.forEach(t -> devSet.add(t._1()));
        DeviceSetBroadcastWrapper deviceSetBroadcastWrapper = DeviceSetBroadcastWrapper.getInstance(jsc, devSet);
        deviceSetBroadcastWrapper.setInterval(broadcastUpdateInterval);
        deviceSetBroadcastWrapper.setUpdateByFixRate(enableAutoBroadcast);

        //启动设备增删消息接收线程
        RabbitMQConsumer devMsgConsumer = new RabbitMQConsumer();
//        devMsgConsumer.setHost(mqHost);
//        devMsgConsumer.setPort(mqPort);
        mqConf.getNodes().forEach(devMsgConsumer::addAddress);
        devMsgConsumer.setQueue(mqNotifyQueue);
        devMsgConsumer.setUserName(mqUsername);
        devMsgConsumer.setPassword(mqPassword);
        devMsgConsumer.setExchange(mqDevExchange);
        devMsgConsumer.setXt(RabbitMQProducer.XT.FANOUT);
        devMsgConsumer.addMsgListener(message -> {
            System.out.println("Recv: " + message);
            DevNotify notify = JSON.parseObject(message, DevNotify.class);
            //设备添加标识
            String add = "Add";
            //设备删除标识
            String del = "Del";
            if (notify.getDevNums() != null) {
                if (add.equalsIgnoreCase(notify.getType())) {
                    for (String dev : notify.getDevNums()) {
                        //初始时假设设备状态为离线
                        deviceSetBroadcastWrapper.addDevice(dev);
                    }
                } else if (del.equalsIgnoreCase(notify.getType())) {
                    for (String dev : notify.getDevNums()) {
                        deviceSetBroadcastWrapper.removeDevice(dev);
                    }
                }
            }
        });
        devMsgConsumer.start();


        //设置广播变量
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).setInterval(broadcastUpdateInterval);
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).setUpdateByFixRate(enableAutoBroadcast);
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
        Broadcast<RabbitMQConf> rabbitMQConfBroadcast = LazySingletonBroadcast.getOrCreate(jsc, RabbitMQConf.class, mqConf);
        Broadcast<Integer> alertLevelBc = LazySingletonBroadcast.getOrCreate(jsc, "EVENT_ALERT_LEVEL", alertLevel);
        long initialSeqValue = getInitialSeqForTable(tableMapping, LocalDate.now(Tool.zoneId), updateByMonth);
        List<String> ts = new ArrayList<>();
        ts.add(topic);
        Broadcast<TableMapping> tableMappingBc = jsc.broadcast(tableMapping);
        Broadcast<Boolean> saveErrorLogBc = jsc.broadcast(enableSaveErrorLog);

        //处理数据库审计系统安全事件日志
        Broadcast<AuditDbSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast
                .getOrCreate(jsc, AuditDbSchemaDescV2.class, new AuditDbSchemaDescV2(tableMapping));
        HashMap<String, String> mainTable = new HashMap<>(16);
        HashMap<String, HashMap<Integer, String>> indexTable = new HashMap<>(32);
        hbaseMainJobs.forEach((splitKey, tableName) -> {
            Map<String, IndexTable> indexJobs = hbaseIndexJobs.get(splitKey);
            if (indexJobs != null) {
                HashMap<Integer, String> tmp = indexTable.computeIfAbsent(splitKey, k -> new HashMap<>(16));
                indexJobs.forEach((indexTableName, table) -> {
                    if (indexTableName.endsWith("CLIENT_IP")) {
                        tmp.put(IndexTableId.CLIENT_IP, indexTableName);
                    } else if (indexTableName.endsWith("INDEX")) {
                        tmp.put(IndexTableId.INDEX, indexTableName);
                    }
                });
            }
            mainTable.put(splitKey, tableName);
        });
        TableNameBroadcastWrapper wapper = TableNameBroadcastWrapper.getInstance(jsc);
        wapper.setData(new TableNameCache(mainTable, indexTable));
        wapper.refreshBroadcast();

        //创建输入流
        JavaDStream<String> dStream = null;
        JavaInputDStream<ConsumerRecord<Object, Object>> kafkaStream = null;
        if (useRabbitmqRecv) {
            List<JavaRabbitMQDistributedKey> distributedKeys = new LinkedList<>();
            distributedKeys.add(new JavaRabbitMQDistributedKey(tableMapping.getQueue(),
                    new ExchangeAndRouting(""),
                    rabbitmqParams));
            Function<QueueingConsumer.Delivery, String> messageHandler = (Function<QueueingConsumer.Delivery, String>) message -> new String(message.getBody());
            dStream = RabbitMQUtils.createJavaDistributedStream(jssc, String.class, distributedKeys, rabbitmqParams, messageHandler);
        } else {
            kafkaStream = KafkaUtils.createDirectStream(
                    jssc,
                    LocationStrategies.PreferConsistent(),
                    ConsumerStrategies.Subscribe(ts, kafkaParams)
            );
            dStream = tableMapping.isRepartition() ? kafkaStream.map(r -> r.value().toString()).repartition(tableMapping.getNumOfPartition())
                    : kafkaStream.map(r -> r.value().toString());
        }

        //audit stream
        JavaDStream<Tuple2<String, AuditDbSchemaDescV2.AuditDbLog>> auditStream = dStream.transform(stream -> {
            Broadcast<AuditRuleFastMatcher> auditRuleMatcherBc = RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
            Broadcast<Map<String, Tuple2<Integer, Integer>>> ccLookupBc = ClientIpClusterBroadcastWrapper.getInstance(jsc).updateAndGet();
            Broadcast<Set<String>> devsBc = DeviceSetBroadcastWrapper.getInstance(jsc, devSet).updateAndGet();
            return stream.map(record -> {
                Map<String, Object> res = new HashMap<>(48);
                try {
                    Map<String, Object> map = (Map<String, Object>) JSON.parse(record);
                    for (Map.Entry<String, Object> entry : map.entrySet()) {
                        res.put(entry.getKey().toLowerCase(), entry.getValue());
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
                //将原始数据保存在__source__字段中，以便后面取出
                res.put("__source__", record);
                return res;
            }).filter(m -> {
                String logType = tableMappingBc.getValue().getLogType();
                Object devId = m.get("devid");
                boolean flag = logType.equals(String.valueOf(m.get("logtype")))
                        && devId != null && devsBc.getValue().contains(devId);
                if (flag) {
                    Object obj = m.get("datas");
                    if (obj instanceof JSONArray) {
                        JSONArray arr = (JSONArray) obj;
                        flag = arr.size() > 0 && arr.get(0) instanceof JSONArray;
                    } else {
                        flag = false;
                        //记录错误日志
                        if (saveErrorLogBc.value()) {
                            AuditDAO.saveErrorLog(LogConstant.LogType.DB_AUDIT_SECU_EVENT_LOG, (String) m.get("__source__"));
                        }
                    }
                } else {
                    //记录错误日志
                    if (saveErrorLogBc.value()) {
                        AuditDAO.saveErrorLog(LogConstant.LogType.DB_AUDIT_SECU_EVENT_LOG, (String) m.get("__source__"));
                    }
                }
                return flag;
            }).flatMap(m -> {
                String devId = (String) m.get("devid");
                String src = (String) m.get("__source__");
                JSONArray arr = (JSONArray) m.get("datas");
                arr.forEach(x -> {
                    ((JSONArray) x).add(devId);
                    ((JSONArray) x).add(src);
                });
                return arr.iterator();
            }).map(m -> (JSONArray) m)
                    .filter(arr -> {
                        String src = (String) arr.remove(arr.size()-1);
                        AuditDbSchemaDescV2 desc = schemaDescBroadcast.value();
                        //格式异常检查
                        boolean flag = true;
                        try {
                            arr.set(desc.logFieldIndex.CLIENT_IP, arr.getLong(desc.logFieldIndex.CLIENT_IP));
                            arr.set(desc.logFieldIndex.SERVER_IP, arr.getLong(desc.logFieldIndex.SERVER_IP));
                            arr.set(desc.logFieldIndex.SERVER_PORT, arr.getInteger(desc.logFieldIndex.SERVER_PORT));
                            arr.set(desc.logFieldIndex.ROW_NUM, arr.getInteger(desc.logFieldIndex.ROW_NUM));
                            String riskLevelStr = arr.getString(desc.logFieldIndex.ORIG_RISK_LEV);
                            if (null == riskLevelStr || riskLevelStr.isEmpty()) {
                                arr.set(desc.logFieldIndex.ORIG_RISK_LEV, 5);
                            } else {
                                int level = Integer.valueOf(riskLevelStr);
                                if (level < 0 || level > 5) {
                                    flag = false;
                                } else {
                                    arr.set(desc.logFieldIndex.ORIG_RISK_LEV, level);
                                }
                            }
                            String actResult = arr.getString(desc.logFieldIndex.ACT_RESULT);
                            if (!"success".equalsIgnoreCase(actResult) && !"error".equalsIgnoreCase(actResult)) {
                                flag = false;
                            }
                        } catch (Exception e) {
                            logger.info("日志数据格式异常，{}", arr.toJSONString());
                            logger.error(e.getMessage(), e.getCause());
                            e.printStackTrace();
                            flag = false;
                        }
                        if (!flag) {
                            //数据格式错误，记录错误日志
                            if (saveErrorLogBc.value()) {
                                AuditDAO.saveErrorLog(LogConstant.LogType.DB_AUDIT_SECU_EVENT_LOG, src);
                            }
                        }
                        return flag;
                    }).map(m -> {
                        AuditDbSchemaDescV2 desc = schemaDescBroadcast.value();
                        Long clientIp = m.getLong(desc.logFieldIndex.CLIENT_IP);
                        Long destIp = m.getLong(desc.logFieldIndex.SERVER_IP);
                        if (clientIp != null) {
                            m.set(desc.logFieldIndex.CLIENT_IP, Tool.getLongIpToString(clientIp));
                        }
                        if (destIp != null) {
                            m.set(desc.logFieldIndex.SERVER_IP, Tool.getLongIpToString(destIp));
                        }

                        ResourceAuditObj auditObj = auditRuleMatcherBc.value().getResourceId(m.getString(desc.logFieldIndex.SERVER_IP), m.getInteger(desc.logFieldIndex.SERVER_PORT), ResourceAuditObj.AUIDIT_SRC);
                        //识别FTP, 由于FTP审计对象在数据库中的端口配置默认全部为0，而接收到数据中端口为21，因此特殊处理
                        if (auditObj == null && "FTP".equals(m.getString(desc.logFieldIndex.DB_NAME))) {
                            auditObj = auditRuleMatcherBc.value().getResourceId(m.getString(desc.logFieldIndex.SERVER_IP), 0, ResourceAuditObj.AUIDIT_SRC);
                        }
                        String ip = m.getString(desc.logFieldIndex.CLIENT_IP);
                        String timeStr = m.getString(desc.logFieldIndex.TIME);
                        LocalDateTime time = Tool.getTimeFromOrDefault(timeStr);
                        String opStr = m.getString(desc.logFieldIndex.OPER);

                        String devId = (String)m.remove(m.size()-1);
                        //初始化结果集
                        AuditDbSchemaDescV2.AuditDbLog log = desc.buildAuditDbLog(m);
                        //设置上报设备
                        log.setReptDeviceNum(devId);
                        log.setCreateTime(Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId)));
                        //设置cluster
                        Tuple2<Integer, Integer> cluster = ccLookupBc.value().get(ip);
                        if (cluster != null) {
                            log.setClusterId(String.valueOf(cluster._1()));
                            log.setClusterType(String.valueOf(cluster._2()));
                        }
                        //生成时间戳
                        long timestamp = getIdTimestamp(time);
                        String resTypeCode = "999";
                        AuditRuleMatchResult armr = null;
                        log.setId(String.valueOf(timestamp));
                        if (auditObj != null && auditObj.getResrcId() > 0) {
                            armr = auditRuleMatcherBc.getValue().match(auditObj.getResrcId(), ip, time, opStr);
                            log.setResrcId(String.valueOf(auditObj.getResrcId()));
                            log.setAuditObjId(String.valueOf(auditObj.getId()));
                            if (auditObj.getResTypeCode() != null) {
                                resTypeCode = auditObj.getResTypeCode();
                            }
                        }

                        int riskLevel = 0;

                        if (armr != null) {
                            //规则匹配成功
                            log.setRuleId(String.valueOf(armr.getRuleId()));
                            log.setLevel(Integer.toString(armr.getRiskLevel()));
                            log.setOperType(auditRuleMatcherBc.getValue().getOpTypeCode(opStr));
                            riskLevel = armr.getRiskLevel();
                        } else {
                            //规则匹配失败
                            Integer oriLvel = m.getInteger(desc.logFieldIndex.ORIG_RISK_LEV);
                            riskLevel = auditRuleMatcherBc.getValue().getRiskLevel(oriLvel);
                            if (oriLvel != null) {
                                log.setLevel(Integer.toString(riskLevel));
                            } else {
                                log.setLevel("0");
                            }
                            log.setOperType(auditRuleMatcherBc.getValue().getOpTypeCode(opStr));
                        }
                        return new Tuple2<>(resTypeCode, log);
                    });
        }).cache();

        Broadcast<AuditRuleFastMatcher> auditRuleMatcherBc = RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
        //流量统计
        auditStream.mapPartitionsToPair(iterator -> {
            AuditDbSchemaDescV2 desc = schemaDescBroadcast.getValue();
            List<Tuple2<Tuple2<Integer, String>, AuditBriefInfo>> briefInfos = new ArrayList<>(4096);
            while (iterator.hasNext()) {
                Tuple2<String, AuditDbSchemaDescV2.AuditDbLog> tuple2 = iterator.next();
                AuditDbSchemaDescV2.AuditDbLog log = tuple2._2();
                String resrcTypeCode = tuple2._1();
                if (resrcTypeCode.equals(FTP)) {
                    continue;
                }
                AuditBriefInfo info = new AuditBriefInfo();
                info.setResrcTypeCode(resrcTypeCode);
                info.setClientIp(log.getClientIp());
                String clusterID = log.getClusterId();
                if (clusterID != null && !clusterID.isEmpty()) {
                    info.setClusterId(clusterID);
                }
                String opTypeCode = log.getOperType();
                info.setOperTypeCode(opTypeCode);
                int resrcId = 0;
                if (!log.getResrcId().isEmpty()) {
                    resrcId = Integer.valueOf(log.getResrcId());
                    info.setResrcId(resrcId);
                } else {
                    continue;
                }
                OpCode opCode = auditRuleMatcherBc.getValue().getOpCode(opTypeCode);
                if (opCode != null) {
                    String num = log.getRowNum();
                    if (opCode.getDirection() == 0) {
                        info.setInAccessNum(1);
                        if (!num.isEmpty()) {
                            info.setInNum(Integer.parseInt(num));
                        }
                    } else if (opCode.getDirection() == 1) {
                        info.setOutAccessNum(1);
                        if (!num.isEmpty()) {
                            info.setOutNum(Integer.parseInt(num));
                        }
                    }
                }

                info.setSrc(0);
                briefInfos.add(new Tuple2<>(new Tuple2<>(resrcId, info.getClientIp()), info));
            }
            return briefInfos.iterator();
        }).reduceByKeyAndWindow((a, b) -> {
            a.setInNum(a.getInNum() + b.getInNum());
            a.setOutNum(a.getOutNum() + b.getOutNum());
            a.setInAccessNum(a.getInAccessNum() + b.getInAccessNum());
            a.setOutAccessNum(a.getOutAccessNum() + b.getOutAccessNum());
            return a;
        }, (d, c) -> {
            d.setInNum(d.getInNum() - c.getInNum());
            d.setOutNum(d.getOutNum() - c.getOutNum());
            d.setInAccessNum(d.getInAccessNum() - c.getInAccessNum());
            d.setOutAccessNum(d.getOutAccessNum() - c.getOutAccessNum());
            return d;
        }, Durations.minutes(window), Durations.minutes(window))
                .foreachRDD(javaRdd -> javaRdd.foreachPartition(tuple2Iterator -> {
                    List<AuditBriefInfo> res = new ArrayList<>(4096);
                    while (tuple2Iterator.hasNext()) {
                        AuditBriefInfo info = tuple2Iterator.next()._2();
                        if (info.getResrcId() > 0 && (info.getInAccessNum() > 0 || info.getOutAccessNum() > 0)) {
                            res.add(info);
                        }
                    }
                    AuditDAO.saveAuditTrafficInfoBatch(res, LocalDateTime.now(Tool.zoneId));
                }));

        String tableQualifyName = tableMapping.getTableName();
        //入库
        auditStream.foreachRDD(listJavaRDD -> {
            LongAccumulator accumulator = JavaIdGenerator.getInstance(jsc, tableQualifyName, initialSeqValue);
            /**
             * 重置序列号
             */
            if (updateByMonth) {
                LocalDate now = LocalDate.now(Tool.zoneId);
                if (now.getMonthValue() != lastDate.getMonthValue()) {
                    resetAccumulator(accumulator, tableMapping.getInitial());
                    lastDate = now;
                }
            }
            //由于累加器在rdd转换中不可读，因此必须每次广播
            Broadcast<Long> seqStart = jsc.broadcast(accumulator.value());
            Broadcast<TableNameCache> splitMainTableB = TableNameBroadcastWrapper.getInstance(jsc).updateAndGet();
            Broadcast<HashMap<Integer, IndexTable>> indexTableB = LazySingletonBroadcast.getOrCreate(jsc, "INDEX_TABLE_BROADCAST", indexTableHashMap);

            listJavaRDD.zipWithIndex().mapPartitionsToPair(iterator -> {
                AuditDbSchemaDescV2 desc = schemaDescBroadcast.getValue();
                String regex = "\\$\\{ACCT_MONTH}";
                HashMap<String, String> mainTableNames = splitMainTableB.getValue().mainTable;
                HashMap<String, HashMap<Integer, String>> indexTableNames = splitMainTableB.getValue().indexTable;
                HashMap<Integer, IndexTable> tableIdIndexTableHashMap = indexTableB.getValue();

                List<AuditBriefInfo> briefInfos = new ArrayList<>(128);

                List<Tuple2<ImmutableBytesWritable, Put>> res = new ArrayList<>(4096);
                StringBuilder sb = new StringBuilder();
                long size = 0;
                List<AlertReason> alertReasons = new ArrayList<>(256);
                int alertLevelLocal = alertLevelBc.getValue();
                while (iterator.hasNext()) {
                    Tuple2<Tuple2<String, AuditDbSchemaDescV2.AuditDbLog>, Long> data = iterator.next();
                    long seq = seqStart.getValue() + data._2();
                    size++;
                    Tuple2<String, AuditDbSchemaDescV2.AuditDbLog> listTuple2 = data._1();
                    String splitKey = listTuple2._1();
                    AuditDbSchemaDescV2.AuditDbLog log = listTuple2._2();
                    //设置id
                    String id = random(RANDOM_LEN) + log.getId() + seq;
                    log.setId(id);

                    Put put = new Put(Bytes.toBytes(id));
                    for (int j = 0; j < desc.getFieldCount(); j++) {
                        if (j != desc.logFieldIndex.ID) {
                            TableMapping.Column col = desc.getColumnByIndex(j);
                            String val = log.get(j);
                            if (val != null && !val.isEmpty()) {
                                put.addColumn(col.getFamilyBytes(), col.getColumnNameBytes(), Bytes.toBytes(val));
                            }
                        }
                    }
                    put.addColumn(desc.getColumnByIndex(0).getFamilyBytes(), PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    put.setDurability(Durability.ASYNC_WAL);

                    String month = log.getTime();
                    if (!month.isEmpty()) {
                        month = month.substring(0, 4) + month.substring(5, 7);
                    } else {
                        month = log.getCreateTime();
                        month = month.substring(0, 4) + month.substring(5, 7);
                    }
                    byte[] tableName = Bytes.toBytes(mainTableNames.get(splitKey).replaceFirst(regex, month));
                    res.add(new Tuple2<>(new ImmutableBytesWritable(tableName), put));

                    //写索引表
                    HashMap<Integer, String> splitIndexTable = indexTableNames.get(splitKey);
                    if (splitIndexTable != null) {
                        String indexClientIpTable = splitIndexTable.get(IndexTableId.CLIENT_IP);
                        IndexTable it = tableIdIndexTableHashMap.get(IndexTableId.CLIENT_IP);
                        if (indexClientIpTable != null) {
                            sb.append(id.substring(RANDOM_LEN, RANDOM_LEN + it.getSubkeyLen()))
                                    .append(log.getResrcId())
                                    .append(NodeDataWrapper.getOrGenerateForClientIp(splitKey, log.getClientIp()));
                            String ctype = log.getClusterType();
                            if (null != ctype && !ctype.isEmpty()) {
                                sb.append(log.getClusterType())
                                        .append(log.getClusterId());
                            } else {
                                sb.append("00000");
                            }
                            sb.append(id.substring(id.length() - it.getRandomLen()));
                            put = new Put(Bytes.toBytes(sb.toString()));
                            put.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                            put.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                            res.add(new Tuple2<>(new ImmutableBytesWritable(Bytes.toBytes(indexClientIpTable.replaceFirst(regex, month))), put));
                            sb.setLength(0);
                        }

                        String indexIndexTable = splitIndexTable.get(IndexTableId.INDEX);
                        it = tableIdIndexTableHashMap.get(IndexTableId.INDEX);
                        if (indexIndexTable != null) {
                            sb.append(id.substring(RANDOM_LEN, RANDOM_LEN + it.getSubkeyLen()))
                                    .append(log.getResrcId())
                                    .append(NodeDataWrapper.getOrGenerateForClientIp(splitKey, log.getClientIp()));
                            String ctype = log.getClusterType();
                            if (null != ctype && !ctype.isEmpty()) {
                                sb.append(log.getClusterType())
                                        .append(log.getClusterId());
                            } else {
                                sb.append("00000");
                            }
                            sb.append(NodeDataWrapper.getOrGenerateForDbAccount(splitKey, log.getUsr()))
                                    .append(NodeDataWrapper.getOrGenerateForDbName(splitKey, log.getDbName()))
                                    .append(NodeDataWrapper.getOrGenerateForTableName(splitKey, log.getTblName()))
                                    .append(log.getLevel())
                                    .append(log.getOperType())
                                    .append("success".equals(log.getActResult()) ? 1 : 0)
                                    .append(id.substring(id.length() - it.getRandomLen()));
                            put = new Put(Bytes.toBytes(sb.toString()));
                            put.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                            put.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                            res.add(new Tuple2<>(new ImmutableBytesWritable(Bytes.toBytes(indexIndexTable.replaceFirst(regex, month))), put));
                            sb.setLength(0);
                        }
                    }

                    //告警处理
                    int level = Integer.parseInt(log.getLevel());
                    if (level >= alertLevelLocal) {
                        AlertReason reason = new AlertReason();
                        reason.setTime(log.getTime());
                        reason.setClientIp(log.getClientIp());
                        reason.setActResult(log.getActResult());
                        reason.setAuditObjId(log.getAuditObjId());
                        reason.setClientUsr(log.getClientUsr());
                        reason.setDbName(log.getDbName());
                        reason.setDbUsr(log.getUsr());
                        reason.setEventId(id);
                        reason.setLevel(log.getLevel());
                        reason.setOpType(log.getOperType());
                        reason.setResrcId(log.getResrcId());
                        reason.setRowNum(log.getRowNum());
                        reason.setRuleId(log.getRuleId());
                        reason.setTblName(log.getTblName());
                        reason.setType("0");
                        reason.setLogType(splitKey);
                        reason.setCreateTime(log.getCreateTime());
                        alertReasons.add(reason);
                    }

                    //应用系统流量统计
                    if (!FTP.equals(splitKey)) {
                        continue;
                    }
                    boolean filter = true;
                    AuditBriefInfo info = new AuditBriefInfo();
                    info.setResrcTypeCode(splitKey);
                    info.setClientIp(log.getClientIp());
                    info.setCreateTime(log.getCreateTime());
                    String clusterID = log.getClusterId();
                    if (clusterID != null && !clusterID.isEmpty()) {
                        info.setClusterId(clusterID);
                    }
                    String opTypeCode = log.getOperType();
                    info.setOperTypeCode(opTypeCode);

                    if (!log.getResrcId().isEmpty()) {
                        info.setResrcId(Integer.valueOf(log.getResrcId()));
                    } else {
                        filter = false;
                    }
                    OpCode opCode = auditRuleMatcherBc.getValue().getOpCode(opTypeCode);
                    if (opCode != null) {
                        if (opCode.getDirection() == 0) {
                            info.setInNum(1);
                        } else if (opCode.getDirection() == 1) {
                            info.setOutNum(1);
                        }

                    }
                    info.setSrc(0);
                    info.setTime(log.getTime());
                    if (filter) {
                        briefInfos.add(info);
                    }
                }
                accumulator.add(size);

                if (alertReasons.size() > 0) {
                    sendAlert(alertReasons, rabbitMQConfBroadcast.value());
                }

                if (briefInfos.size() > 0) {
                    ThreadPool.getExecutorService().submit(() -> {
                        AuditDAO.saveAuditTrafficAppInfoBatch(briefInfos);
                    });
                }

                return res.iterator();
            }).saveAsNewAPIHadoopDataset(auidtJob.getConfiguration());

            ThreadPool.getExecutorService().submit(() -> {
                AuditDAO.updateHbaseRecordSeqValue(tableMapping.getTableName(), accumulator.value());
            });

        });

        //update kafka offset
        if (kafkaStream != null) {
            CanCommitOffsets offsets = (CanCommitOffsets)kafkaStream.inputDStream();
            kafkaStream.foreachRDD(javaRDD -> {
                OffsetRange[] offsetRanges = ((HasOffsetRanges) javaRDD.rdd()).offsetRanges();
                offsets.commitAsync(offsetRanges);
            });
        }
        jssc.start();
        jssc.awaitTermination();
        jssc.close();
    }

    /**
     * 最近更新日期
     */
    private volatile static LocalDate lastDate = LocalDate.now(Tool.zoneId);

    /**
     * 在Executor上缓存数据
     */
    protected static class NodeDataWrapper {

        private static Map<String, Integer> mysqlAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> mysqlAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> mysqlAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> mysqlAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> hbaseAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> hbaseAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> hbaseAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> hbaseAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> solrAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> solrAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> solrAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> solrAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> mongodbAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> mongodbAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> mongodbAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> mongodbAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> ftpAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> ftpAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> ftpAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> ftpAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, LinkedBlockingQueue<Integer>> accessMapQueue = new ConcurrentHashMap<>(16);

        static {
            Thread thread = new Thread(getFlushTimeTask());
            thread.setName("Update Ip Access Time");
            thread.start();
        }

        /**
         * 资源类型
         */
        protected static final String MYSQL = "001";
        protected static final String HBASE = "009";
        protected static final String SOLR = "010";
        protected static final String MONGODB = "017";
        protected static final String FTP = "016";

        public static Integer getOrGenerateForClientIp(String splitKey, String ip) {
            if (ip == null || ip.isEmpty()) {
                ip = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditClientIpSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_CLIENT_IP,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditClientIpSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_CLIENT_IP,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditClientIpSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_CLIENT_IP,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditClientIpSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_CLIENT_IP,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditClientIpSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_CLIENT_IP,
                            FTP, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForTableName(String splitKey, String tblName) {
            if (tblName == null || tblName.isEmpty()) {
                tblName = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditTableNameSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_TABLE_NAME,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditTableNameSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_TABLE_NAME,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditTableNameSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_TABLE_NAME,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditTableNameSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_TABLE_NAME,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditTableNameSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_TABLE_NAME,
                            FTP, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForDbAccount(String splitKey, String account) {
            if (account == null || account.isEmpty()) {
                account = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditDbAccountSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditDbAccountSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditDbAccountSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditDbAccountSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditDbAccountSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            FTP, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForDbName(String splitKey, String dbName) {
            if (dbName == null || dbName.isEmpty()) {
                dbName = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditDbNameSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_NAME,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditDbNameSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_NAME,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditDbNameSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_NAME,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditDbNameSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_NAME,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditDbNameSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_NAME,
                            FTP, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                default:
                    break;
            }
            return seq;
        }

        private static Integer getOrGenerate(Map<String, Integer> seqMap, String tableName, String dbType, int typeId, String content) {
            Integer seq = seqMap.get(content);
            LinkedBlockingQueue<Integer> queue = accessMapQueue.computeIfAbsent(tableName, k -> new LinkedBlockingQueue<>());
            if (seq == null) {
                while (true) {
                    seq = AuditDAO.findAuditHbaseTypeByContent(tableName, content);
                    if (seq != null) {
                        seqMap.put(content, seq);
                        queue.add(seq);
                        return seq;
                    } else {
                        seq = AuditDAO.generateAuditId(tableName, content, typeId, dbType);
                        if (seq != -1) {
                            seqMap.put(content, seq);
                            queue.add(seq);
                            return seq;
                        }
                    }
                }
            } else {
                queue.add(seq);
                return seq;
            }
        }

        /**
         * 后台刷新线程，更新访问时间戳
         *
         * @return
         */
        public static Runnable getFlushTimeTask() {
            return new Runnable() {
                @Override
                public void run() {
                    while (true) {
                        accessMapQueue.forEach((tableName, queue) -> {
                            int size = queue.size();
                            Set<Integer> val = new HashSet<>(size);
                            for (int i = 0; i < size && !queue.isEmpty(); i++) {
                                val.add(queue.poll());
                            }
                            AuditDAO.updateAuditResSeq(tableName, val);
                        });

                        try {
                            Thread.sleep(60000);
                        } catch (InterruptedException e) {
                            e.printStackTrace();
                        }
                    }
                }
            };
        }
    }


    /**
     * 广播变量Wrapper
     * 客户端采集集群查找表，key客户端ip，值为Tuple，其中第0个值是集群id, 第1值是集群类型
     */
    private static class ClientIpClusterBroadcastWrapper extends AbstractBroadcastWrapper<Map<String, Tuple2<Integer, Integer>>> implements MessageListener {

        private static volatile ClientIpClusterBroadcastWrapper INSTANCE;

        private ClientIpClusterBroadcastWrapper(JavaSparkContext jsc) {
            super(jsc);
        }

        private static final String CLIENT_IP_CLUSTER_MSG_TYPE_PREFIX = "ClientIpCluster";

        static ClientIpClusterBroadcastWrapper getInstance(JavaSparkContext jsc) {
            if (INSTANCE == null) {
                synchronized (ClientIpClusterBroadcastWrapper.class) {
                    if (INSTANCE == null) {
                        INSTANCE = new ClientIpClusterBroadcastWrapper(jsc);
                    }
                }
            }
            return INSTANCE;
        }

        @Override
        protected Broadcast<Map<String, Tuple2<Integer, Integer>>> broadcast(JavaSparkContext jsc) {
            Map<String, int[]> map = AuditDAO.findAllClientCluster();
            Map<String, Tuple2<Integer, Integer>> bmap = new HashMap<>(map.size());
            map.forEach((key, value) -> {
                bmap.put(key, new Tuple2<>(value[0], value[1]));
            });
            return jsc.broadcast(bmap);
        }

        @Override
        public void fireMessage(String message) {
            System.out.println("Recv :" + message);
            try {
                Map<String, Object> msgObj = (Map<String, Object>) JSON.parse(message);
                String type = String.valueOf(msgObj.get("type"));
                if (null != type && type.startsWith(CLIENT_IP_CLUSTER_MSG_TYPE_PREFIX)) {
                    refreshBroadcast();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    private static class TableNameBroadcastWrapper extends AbstractBroadcastWrapper<TableNameCache> {

        private static volatile TableNameBroadcastWrapper INSTANCE;

        private TableNameBroadcastWrapper(JavaSparkContext jsc) {
            super(jsc);
        }

        private TableNameCache tableNameCache;

        static TableNameBroadcastWrapper getInstance(JavaSparkContext jsc) {
            if (INSTANCE == null) {
                synchronized (TableNameBroadcastWrapper.class) {
                    if (INSTANCE == null) {
                        INSTANCE = new TableNameBroadcastWrapper(jsc);
                    }
                }
            }
            return INSTANCE;
        }

        public void setData(TableNameCache tableNameCache) {
            this.tableNameCache = tableNameCache;
        }

        @Override
        protected Broadcast<TableNameCache> broadcast(JavaSparkContext jsc) {
            return jsc.broadcast(tableNameCache);
        }
    }

    public static class TableNameCache implements Serializable {
        /**
         * 主表，键为split_key, 值为表名称
         */
        public HashMap<String, String> mainTable;
        /**
         * 索引表，键为split_key, 值为索引表id，名称映射
         */
        public HashMap<String, HashMap<Integer, String>> indexTable;

        public TableNameCache(HashMap<String, String> mainTable, HashMap<String, HashMap<Integer, String>> indexTable) {
            this.mainTable = mainTable;
            this.indexTable = indexTable;
        }

        public TableNameCache() {
        }
    }

    public static class IndexTableId implements Serializable {
        public static final int CLIENT_IP = 0;
        public static final int INDEX = 1;
    }
}
