package com.cetc.sdp.kmga.cs.stream.nv;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.cetc.sdp.kmga.cs.audit.*;
import com.cetc.sdp.kmga.cs.common.RabbitMQConf;
import com.cetc.sdp.kmga.cs.common.TableMeta;
import com.cetc.sdp.kmga.cs.jdbc.AuditDAO;
import com.cetc.sdp.kmga.cs.jdbc.DeviceDAO;
import com.cetc.sdp.kmga.cs.stream.DeviceSetBroadcastWrapper;
import com.cetc.sdp.kmga.cs.stream.StreamUtils;
import com.cetc.sdp.kmga.cs.util.*;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.util.LongAccumulator;
import scala.Tuple2;

import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;

import static com.cetc.sdp.kmga.cs.stream.StreamUtils.*;
import static com.cetc.sdp.kmga.cs.stream.nv.AuditLogStreaming.NodeDataWrapper.FTP;

/**
 * 数据库审计日志处理流
 * @description:
 * @author： DengQiang
 * @date: 2018/3/30 14:23
 */
public class AuditLogStreaming implements StreamWork.StreamJob {

    private StreamJobConf conf;
    private AbstractDStream dStream;

    //程序中用到的日志字段名称
    private static final String CLIENT_IP = "CLIENT_IP";
    private static final String SERVER_IP = "SERVER_IP";
    private static final String CLIENT_USR = "CLIENT_USR";
    private static final String SERVER_PORT = "SERVER_PORT";
    private static final String ROW_NUM = "ROW_NUM";
    private static final String ORIG_RISK_LEV = "ORIG_RISK_LEV";
    private static final String ACT_RESULT = "ACT_RESULT";
    private static final String DB_NAME = "DB_NAME";
    private static final String TBL_NAME = "TBL_NAME";
    private static final String USR = "USR";
    private static final String TIME = "TIME";
    private static final String OPER = "OPER";
    //数据库新增字段
    private static final byte[] RULE_ID = Bytes.toBytes("RULE_ID");
    private static final byte[] RESRC_ID = Bytes.toBytes("RESRC_ID");
    private static final byte[] AUDIT_OBJ_ID = Bytes.toBytes("AUDIT_OBJ_ID");
    private static final byte[] LEVEL = Bytes.toBytes("LEVEL");
    private static final byte[] OPER_TYPE = Bytes.toBytes("OPER_TYPE");
    private static final byte[] REPT_DEVICE_NUM = Bytes.toBytes("REPT_DEVICE_NUM");
    private static final byte[] CREATE_TIME = Bytes.toBytes("CREATE_TIME");
    private static final byte[] CLUSTER_TYPE = Bytes.toBytes("CLUSTER_TYPE");
    private static final byte[] CLUSTER_ID = Bytes.toBytes("CLUSTER_ID");

    private static final String SUCCESS = "success";
    private static final String ERROR = "error";
    private final static byte[] FAMILY_C1 = Bytes.toBytes("C1");
    private final static byte[] FAMILY_C2 = Bytes.toBytes("C2");

    /**
     * 默认窗口统计时间60s
     */
    private long window = 60;
    private static final int RANDOM_LEN = 3;

    public AuditLogStreaming(StreamJobConf conf, AbstractDStream dStream) {
        this.conf = conf;
        this.dStream = dStream;
    }

    /**
     * 设置窗口时间
     * @param window seconds
     */
    public void setWindow(long window) {
        this.window = window;
    }

    @Override
    public void processStreaming(StreamWork.Context context) {
        Job hbaseJob = context.configJob(conf.getHBaseJobConf());
        String logType = conf.getLogType().iterator().next();
        boolean saveErrorLog = conf.saveErrorLog();
        int alertLevel = conf.getAlertLevel();
        JavaSparkContext jsc = context.getSparkContext();

        Map<Integer, HTableFiled> filedMap = conf.getLogFields();
        Map<String, Integer> colIndex = new HashMap<>(filedMap.size());
        byte[][] fileds = new byte[filedMap.size()][];
        byte[][] colFamily = new byte[filedMap.size()][];
        conf.getLogFields().forEach((k, v) -> {
            fileds[k] = Bytes.toBytes(v.getName());
            colFamily[k] = Bytes.toBytes(v.getColFamily());
            colIndex.put(v.getName(), k);
        });
        TableMeta tableMeta = AuditDAO.findTableMetaByName(conf.getHTableName());
        //映射字段的索引
        int clientIpIndex = colIndex.get(CLIENT_IP);
        int serverIpIndex = colIndex.get(SERVER_IP);
        int serverPortIndex = colIndex.get(SERVER_PORT);
        int rowNumIndex = colIndex.get(ROW_NUM);
        int origRiskLevIndex = colIndex.get(ORIG_RISK_LEV);
        int actResultIndex = colIndex.get(ACT_RESULT);
        int dbNameIndex = colIndex.get(DB_NAME);
        int timeIndex = colIndex.get(TIME);
        int operIndex = colIndex.get(OPER);
        int usrIndex = colIndex.get(USR);
        int tblNameIndex = colIndex.get(TBL_NAME);
        int clientUsrIndex = colIndex.get(CLIENT_USR);

        int logSize = fileds.length + 10;
        int ruleIdIdx = fileds.length;
        int resrcIdIdx = fileds.length + 1;
        int auditObjIdIdx = fileds.length + 2;
        int levelIdx = fileds.length + 3;
        int operTypeIdx = fileds.length + 4;
        int reptDeviceNumIdx = fileds.length + 5;
        int createTimeIdx = fileds.length + 6;
        int clusterTypeIdx = fileds.length + 7;
        int clusterIdIdx = fileds.length + 8;
        int idIdx = fileds.length + 9;

        RabbitMQConf mqConf = StreamUtils.createRabbitMQConf(conf.getRabbitMqConf());
        //启动审计规则消息接收线程
        RabbitMQConsumer auditMsgConsumer = new RabbitMQConsumer();
        mqConf.getNodes().forEach(auditMsgConsumer::addAddress);
        auditMsgConsumer.setQueue(mqConf.getMqNotifyQueue());
        auditMsgConsumer.setUserName(mqConf.getMqUsername());
        auditMsgConsumer.setPassword(mqConf.getMqPassword());
        auditMsgConsumer.setExchange(mqConf.getMqNotifyExchange());
        auditMsgConsumer.setXt(RabbitMQProducer.XT.FANOUT);
        auditMsgConsumer.addMsgListener(StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(context.getSparkContext()));
        auditMsgConsumer.addMsgListener(StreamUtils.ClientIpClusterBroadcastWrapper.getInstance(context.getSparkContext()));
        auditMsgConsumer.start();

        //初始化设备列表
        List<Tuple2<String, Integer>> allDevices = DeviceDAO.findAllDevice();
        Set<String> devSet = new HashSet<>();
        allDevices.forEach(t -> devSet.add(t._1()));
        DeviceSetBroadcastWrapper deviceSetBroadcastWrapper = context.getDeviceSetBroadcastWrapper();
        deviceSetBroadcastWrapper.setInterval(conf.getReBroadcastInterval());
        deviceSetBroadcastWrapper.setUpdateByFixRate(conf.autoReBroadcast());

        //设置广播变量
        StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).setInterval(conf.getReBroadcastInterval());
        StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).setUpdateByFixRate(conf.autoReBroadcast());
        StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
        Broadcast<RabbitMQConf> rabbitMQConfBroadcast = jsc.broadcast(mqConf);
        Broadcast<byte[][]> fieldsBc = jsc.broadcast(fileds);
        Broadcast<byte[][]> familyBc = jsc.broadcast(colFamily);

        //获取初始序列号
        long initialSeqValue = getInitialSeqForTable(tableMeta, LocalDate.now(Tool.zoneId), true);

        JavaDStream<Tuple2<String, String[]>> auditStream = this.dStream.getDStreamAsPlaintext(context.getStreamingContext()).transform(stream -> {
            Broadcast<AuditRuleFastMatcher> auditRuleMatcherBc = StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
            Broadcast<Map<String, Tuple2<Integer, Integer>>> ccLookupBc = StreamUtils.ClientIpClusterBroadcastWrapper.getInstance(jsc).updateAndGet();
            Broadcast<Set<String>> devsBc = DeviceSetBroadcastWrapper.getInstance(jsc, devSet).updateAndGet();
            return stream.map(log -> {
                Map<String, Object> res = new HashMap<>(48);
                try {
                    @SuppressWarnings("unchecked")
                    Map<String, Object> map = (Map<String, Object>) JSON.parse(log);
                    for (Map.Entry<String, Object> entry : map.entrySet()) {
                        res.put(entry.getKey().toLowerCase(), entry.getValue());
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    System.out.println(Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now()) + ": ------------ERROR Log------------");
                    System.out.println(log);
                }
                //将原始数据保存在__source__字段中，以便后面取出
                res.put("__source__", log);
                return res;
            }).filter(m -> {
                Object devId = m.get("devid");
                boolean flag = logType.equals(String.valueOf(m.get("logtype")))
                        && devId != null && devsBc.getValue().contains(devId);
                if (flag) {
                    Object obj = m.get("datas");
                    if (obj instanceof JSONArray) {
                        JSONArray arr = (JSONArray) obj;
                        flag = arr.size() > 0 && arr.get(0) instanceof JSONArray;
                    } else {
                        flag = false;
                        //记录错误日志
                        if (saveErrorLog) {
                            AuditDAO.saveErrorLog(logType, (String) m.get("__source__"));
                        }
                    }
                } else {
                    //记录错误日志
                    if (saveErrorLog) {
                        AuditDAO.saveErrorLog(logType, (String) m.get("__source__"));
                    }
                }
                return flag;
            }).flatMap(m -> {
                String devId = (String) m.get("devid");
                String src = (String) m.get("__source__");
                JSONArray arr = (JSONArray) m.get("datas");
                arr.forEach(x -> {
                    ((JSONArray) x).add(devId);
                    ((JSONArray) x).add(src);
                });
                return arr.iterator();
            }).map(m -> (JSONArray) m)
                    .filter(arr -> {
                        String src = (String) arr.remove(arr.size() - 1);
                        //格式异常检查
                        boolean flag = true;
                        try {
                            arr.set(clientIpIndex, arr.getLong(clientIpIndex));
                            arr.set(serverIpIndex, arr.getLong(serverIpIndex));
                            arr.set(serverPortIndex, arr.getInteger(serverPortIndex));
                            arr.set(rowNumIndex, arr.getInteger(rowNumIndex));
                            String riskLevelStr = arr.getString(origRiskLevIndex);
                            if (null == riskLevelStr || riskLevelStr.isEmpty()) {
                                arr.set(origRiskLevIndex, 5);
                            } else {
                                int level = Integer.valueOf(riskLevelStr);
                                if (level < 0 || level > 5) {
                                    flag = false;
                                } else {
                                    arr.set(origRiskLevIndex, level);
                                }
                            }
                            String actResult = arr.getString(actResultIndex);
                            if (!SUCCESS.equalsIgnoreCase(actResult) && !ERROR.equalsIgnoreCase(actResult)) {
                                flag = false;
                            }
                        } catch (Exception e) {
                            System.out.println("日志数据格式异常:" + arr.toJSONString());
                            e.printStackTrace();
                            flag = false;
                        }
                        if (!flag) {
                            //数据格式错误，记录错误日志
                            if (saveErrorLog) {
                                AuditDAO.saveErrorLog(logType, src);
                            }
                        }
                        return flag;
                    }).map(m -> {
                        Long clientIp = m.getLong(clientIpIndex);
                        Long destIp = m.getLong(serverIpIndex);
                        if (clientIp != null) {
                            m.set(clientIpIndex, Tool.getLongIpToString(clientIp));
                        }
                        if (destIp != null) {
                            m.set(serverIpIndex, Tool.getLongIpToString(destIp));
                        }

                        ResourceAuditObj auditObj = auditRuleMatcherBc.value().getResourceId(m.getString(serverIpIndex), m.getInteger(serverPortIndex), ResourceAuditObj.AUIDIT_SRC);
                        //识别FTP, 由于FTP审计对象在数据库中的端口配置默认全部为0，而接收到数据中端口为21，因此特殊处理
                        if (auditObj == null && "FTP".equals(m.getString(dbNameIndex))) {
                            auditObj = auditRuleMatcherBc.value().getResourceId(m.getString(serverIpIndex), 0, ResourceAuditObj.AUIDIT_SRC);
                        }
                        String ip = m.getString(clientIpIndex);
                        String timeStr = m.getString(timeIndex);
                        LocalDateTime time = Tool.getTimeFromOrDefault(timeStr);
                        String opStr = m.getString(operIndex);

                        //初始化结果集
                        String[] log = new String[logSize];
                        for (int i = 0; i < logSize - 10; i++) {
                            log[i] = m.getString(i);
                        }
                        //设置上报设备
                        log[reptDeviceNumIdx] = m.getString(m.size() - 1);
                        log[createTimeIdx] = Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId));
                        //设置cluster
                        Tuple2<Integer, Integer> cluster = ccLookupBc.value().get(ip);
                        if (cluster != null) {
                            log[clusterIdIdx] = String.valueOf(cluster._1());
                            log[clusterTypeIdx] = String.valueOf(cluster._2());
                        }
                        //生成时间戳
                        long timestamp = StreamUtils.getIdTimestamp(time);
                        String resTypeCode = "999";
                        AuditRuleMatchResult armr = null;
                        log[idIdx] = (String.valueOf(timestamp));
                        if (auditObj != null && auditObj.getResrcId() > 0) {
                            armr = auditRuleMatcherBc.getValue().match(auditObj.getResrcId(), ip, time, opStr);
                            log[resrcIdIdx] = String.valueOf(auditObj.getResrcId());
                            log[auditObjIdIdx] = String.valueOf(auditObj.getId());
                            if (auditObj.getResTypeCode() != null) {
                                resTypeCode = auditObj.getResTypeCode();
                            }
                        }

                        int riskLevel = 0;

                        if (armr != null) {
                            //规则匹配成功
                            log[ruleIdIdx] = String.valueOf(armr.getRuleId());
                            log[levelIdx] = Integer.toString(armr.getRiskLevel());
                            log[operTypeIdx] = auditRuleMatcherBc.getValue().getOpTypeCode(opStr);
                            riskLevel = armr.getRiskLevel();
                        } else {
                            //规则匹配失败
                            Integer oriLvel = m.getInteger(origRiskLevIndex);
                            riskLevel = auditRuleMatcherBc.getValue().getRiskLevel(oriLvel);
                            if (oriLvel != null) {
                                log[levelIdx] = Integer.toString(riskLevel);
                            } else {
                                log[levelIdx] = "0";
                            }
                            log[operTypeIdx] = auditRuleMatcherBc.getValue().getOpTypeCode(opStr);
                        }
                        return new Tuple2<>(resTypeCode, log);
                    });
        }).cache();

        Broadcast<AuditRuleFastMatcher> auditRuleMatcherBc = StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
        //流量统计
        auditStream.mapPartitionsToPair(iterator -> {
            List<Tuple2<Tuple2<Integer, String>, AuditBriefInfo>> briefInfos = new ArrayList<>(4096);
            while (iterator.hasNext()) {
                Tuple2<String, String[]> tuple2 = iterator.next();
                String[] log = tuple2._2();
                String resrcTypeCode = tuple2._1();
                if (resrcTypeCode.equals(FTP)) {
                    continue;
                }
                AuditBriefInfo info = new AuditBriefInfo();
                info.setResrcTypeCode(resrcTypeCode);
                info.setClientIp(log[clientIpIndex]);
                String clusterID = log[clusterIdIdx];
                if (clusterID != null && !clusterID.isEmpty()) {
                    info.setClusterId(clusterID);
                }
                String opTypeCode = log[operTypeIdx];
                info.setOperTypeCode(opTypeCode);
                int resrcId = 0;
                if (log[resrcIdIdx] != null && !log[resrcIdIdx].isEmpty()) {
                    resrcId = Integer.valueOf(log[resrcIdIdx]);
                    info.setResrcId(resrcId);
                } else {
                    continue;
                }
                OpCode opCode = auditRuleMatcherBc.getValue().getOpCode(opTypeCode);
                if (opCode != null) {
                    String num = log[rowNumIndex];
                    if (opCode.getDirection() == 0) {
                        info.setInAccessNum(1);
                        if (!num.isEmpty()) {
                            info.setInNum(Integer.parseInt(num));
                        }
                    } else if (opCode.getDirection() == 1) {
                        info.setOutAccessNum(1);
                        if (!num.isEmpty()) {
                            info.setOutNum(Integer.parseInt(num));
                        }
                    }
                }

                info.setSrc(0);
                briefInfos.add(new Tuple2<>(new Tuple2<>(resrcId, info.getClientIp()), info));
            }
            return briefInfos.iterator();
        }).reduceByKeyAndWindow((a, b) -> {
            a.setInNum(a.getInNum() + b.getInNum());
            a.setOutNum(a.getOutNum() + b.getOutNum());
            a.setInAccessNum(a.getInAccessNum() + b.getInAccessNum());
            a.setOutAccessNum(a.getOutAccessNum() + b.getOutAccessNum());
            return a;
        }, (d, c) -> {
            d.setInNum(d.getInNum() - c.getInNum());
            d.setOutNum(d.getOutNum() - c.getOutNum());
            d.setInAccessNum(d.getInAccessNum() - c.getInAccessNum());
            d.setOutAccessNum(d.getOutAccessNum() - c.getOutAccessNum());
            return d;
        }, Durations.seconds(window), Durations.seconds(window))
                .foreachRDD(javaRdd -> javaRdd.foreachPartition(tuple2Iterator -> {
                    List<AuditBriefInfo> res = new ArrayList<>(4096);
                    while (tuple2Iterator.hasNext()) {
                        AuditBriefInfo info = tuple2Iterator.next()._2();
                        if (info.getResrcId() > 0 && (info.getInAccessNum() > 0 || info.getOutAccessNum() > 0)) {
                            res.add(info);
                        }
                    }
                    AuditDAO.saveAuditTrafficInfoBatch(res, LocalDateTime.now(Tool.zoneId));
                }));

        String tableQualifyName = conf.getHTableName();
        //入库
        auditStream.foreachRDD(listJavaRDD -> {
            LongAccumulator accumulator = StreamUtils.JavaIdGenerator.getInstance(jsc, tableQualifyName, initialSeqValue);
            /**
             * 重置序列号
             */
            LocalDate now = LocalDate.now(Tool.zoneId);
            if (now.getMonthValue() != lastDate.getMonthValue()) {
                resetAccumulator(accumulator, tableMeta.getInitial());
                lastDate = now;
            }
            //由于累加器在rdd转换中不可读，因此必须每次广播
            long seqStart = accumulator.value();

            listJavaRDD.zipWithIndex().mapPartitionsToPair(iterator -> {
                String regex = "\\$\\{ACCT_MONTH}";
                List<AuditBriefInfo> briefInfos = new ArrayList<>(128);

                byte[][] fields = fieldsBc.getValue();
                byte[][] families = familyBc.getValue();

                List<Tuple2<ImmutableBytesWritable, Put>> res = new ArrayList<>(4096);
                StringBuilder sb = new StringBuilder();
                long size = 0;
                List<AlertReason> alertReasons = new ArrayList<>(256);
                while (iterator.hasNext()) {
                    Tuple2<Tuple2<String, String[]>, Long> data = iterator.next();
                    long seq = seqStart + data._2();
                    size++;
                    Tuple2<String, String[]> listTuple2 = data._1();
                    String splitKey = listTuple2._1();
                    String[] log = listTuple2._2();
                    //设置id
                    String id = random(RANDOM_LEN) + log[idIdx] + seq;
                    String time = log[idIdx];
                    log[idIdx] = id;

                    Put put = new Put(Bytes.toBytes(id));
                    for (int i = 0; i < fields.length; i++) {
                        if (log[i] != null && !log[i].isEmpty()) {
                            put.addColumn(families[i], fileds[i], Bytes.toBytes(log[i]));
                        }
                    }
                    addCol(log[ruleIdIdx], RULE_ID, put, FAMILY_C1);
                    addCol(log[resrcIdIdx], RESRC_ID, put, FAMILY_C2);
                    addCol(log[auditObjIdIdx], AUDIT_OBJ_ID, put, FAMILY_C2);
                    addCol(log[levelIdx], LEVEL, put, FAMILY_C2);
                    addCol(log[operTypeIdx], OPER_TYPE, put, FAMILY_C2);
                    addCol(log[reptDeviceNumIdx], REPT_DEVICE_NUM, put, FAMILY_C2);
                    addCol(log[createTimeIdx], CREATE_TIME, put, FAMILY_C1);
                    addCol(log[clusterIdIdx], CLUSTER_ID, put, FAMILY_C1);
                    addCol(log[clusterTypeIdx], CLUSTER_TYPE, put, FAMILY_C1);

                    put.addColumn(FAMILY_C1, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    put.setDurability(Durability.ASYNC_WAL);

                    String month = log[timeIndex];
                    if (month != null && !month.isEmpty()) {
                        month = month.substring(0, 4) + month.substring(5, 7);
                    } else {
                        month = log[createTimeIdx];
                        month = month.substring(0, 4) + month.substring(5, 7);
                    }
                    String targetTable = tableQualifyName.replaceFirst(regex, splitKey + "_" + month);
                    byte[] tableName = Bytes.toBytes(targetTable);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(tableName), put));

                    //写ClientIp索引表
                    sb.append(time)
                            .append(log[resrcIdIdx])
                            .append(NodeDataWrapper.getOrGenerateForClientIp(splitKey, log[clientIpIndex]));
                    String ctype = log[clusterTypeIdx];
                    if (null != ctype && !ctype.isEmpty()) {
                        sb.append(ctype)
                                .append(log[clusterIdIdx]);
                    } else {
                        sb.append("00000");
                    }
                    sb.append(id.substring(id.length() - 6));
                    put = new Put(Bytes.toBytes(sb.toString()));
                    put.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                    put.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(Bytes.toBytes(targetTable + "_CLIENT_IP")), put));


                    sb.setLength(sb.length() - 6);
                    sb.append(NodeDataWrapper.getOrGenerateForDbAccount(splitKey, log[usrIndex]))
                            .append(NodeDataWrapper.getOrGenerateForDbName(splitKey, log[dbNameIndex]))
                            .append(NodeDataWrapper.getOrGenerateForTableName(splitKey, log[tblNameIndex]))
                            .append(log[levelIdx])
                            .append(log[operTypeIdx])
                            .append("success".equals(log[actResultIndex]) ? 1 : 0)
                            .append(id.substring(id.length() - 6));
                    put = new Put(Bytes.toBytes(sb.toString()));
                    put.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                    put.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(Bytes.toBytes(targetTable + "_INDEX")), put));
                    sb.setLength(0);

                    //告警处理
                    int level = Integer.parseInt(log[levelIdx]);
                    if (level >= alertLevel) {
                        AlertReason reason = new AlertReason();
                        reason.setTime(log[timeIndex]);
                        reason.setClientIp(log[clientIpIndex]);
                        reason.setActResult(log[actResultIndex]);
                        reason.setAuditObjId(log[auditObjIdIdx]);
                        reason.setClientUsr(log[clientUsrIndex]);
                        reason.setDbName(log[dbNameIndex]);
                        reason.setDbUsr(log[usrIndex]);
                        reason.setEventId(id);
                        reason.setLevel(log[levelIdx]);
                        reason.setOpType(log[operTypeIdx]);
                        reason.setResrcId(log[resrcIdIdx]);
                        reason.setRowNum(log[rowNumIndex]);
                        reason.setRuleId(log[ruleIdIdx]);
                        reason.setTblName(log[tblNameIndex]);
                        reason.setType("0");
                        reason.setLogType(splitKey);
                        reason.setCreateTime(log[createTimeIdx]);
                        alertReasons.add(reason);
                    }

                    //应用系统流量统计
                    if (!FTP.equals(splitKey)) {
                        continue;
                    }
                    boolean filter = true;
                    AuditBriefInfo info = new AuditBriefInfo();
                    info.setResrcTypeCode(splitKey);
                    info.setClientIp(log[clientIpIndex]);
                    info.setCreateTime(log[createTimeIdx]);
                    String clusterID = log[clusterIdIdx];
                    if (clusterID != null && !clusterID.isEmpty()) {
                        info.setClusterId(clusterID);
                    }
                    String opTypeCode = log[operTypeIdx];
                    info.setOperTypeCode(opTypeCode);

                    String resrcId = log[resrcIdIdx];
                    if (resrcId != null && !resrcId.isEmpty()) {
                        info.setResrcId(Integer.valueOf(resrcId));
                    } else {
                        filter = false;
                    }
                    OpCode opCode = auditRuleMatcherBc.getValue().getOpCode(opTypeCode);
                    if (opCode != null) {
                        if (opCode.getDirection() == 0) {
                            info.setInNum(1);
                        } else if (opCode.getDirection() == 1) {
                            info.setOutNum(1);
                        }

                    }
                    info.setSrc(0);
                    info.setTime((String)log[timeIndex]);
                    if (filter) {
                        briefInfos.add(info);
                    }
                }
                accumulator.add(size);

                if (alertReasons.size() > 0) {
                    sendAlert(alertReasons, rabbitMQConfBroadcast.value());
                }

                if (briefInfos.size() > 0) {
                    ThreadPool.getExecutorService().submit(() -> {
                        AuditDAO.saveAuditTrafficAppInfoBatch(briefInfos);
                    });
                }

                return res.iterator();
            }).saveAsNewAPIHadoopDataset(hbaseJob.getConfiguration());

            ThreadPool.getExecutorService().submit(() -> {
                AuditDAO.updateHbaseRecordSeqValue(tableQualifyName, accumulator.value());
            });

            this.dStream.afterBatchProcessed();
        });
    }

    @Override
    public void afterProcess(StreamWork.Context context) {
//        this.dStream.afterBatchProcessed();
    }

    private static void addCol(Object val, byte[] col, Put put, byte[] family) {
        if (val != null) {
            String str = String.valueOf(val);
            if (!str.isEmpty()) {
                put.addColumn(family, col, Bytes.toBytes(str));
            }
        }
    }

    /**
     * 最近更新日期
     */
    private volatile static LocalDate lastDate = LocalDate.now(Tool.zoneId);

    /**
     * 在Executor上缓存数据
     */
    protected static class NodeDataWrapper {

        private static Map<String, Integer> mysqlAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> mysqlAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> mysqlAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> mysqlAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> hbaseAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> hbaseAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> hbaseAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> hbaseAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> solrAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> solrAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> solrAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> solrAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> mongodbAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> mongodbAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> mongodbAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> mongodbAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, Integer> ftpAuditClientIpSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_CLIENT_IP);
        private static Map<String, Integer> ftpAuditTableNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_TABLE_NAME);
        private static Map<String, Integer> ftpAuditDbAccountSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_ACCOUNT);
        private static Map<String, Integer> ftpAuditDbNameSeqMap = AuditDAO.findAllAuditHbaseType(AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_NAME);

        private static Map<String, LinkedBlockingQueue<Integer>> accessMapQueue = new ConcurrentHashMap<>(16);

        static {
            Thread thread = new Thread(getFlushTimeTask());
            thread.setName("Update Ip Access Time");
            thread.start();
        }

        /**
         * 资源类型
         */
        protected static final String MYSQL = "001";
        protected static final String HBASE = "009";
        protected static final String SOLR = "010";
        protected static final String MONGODB = "017";
        protected static final String FTP = "016";
        protected static final String OTHER = "999";

        public static Integer getOrGenerateForClientIp(String splitKey, String ip) {
            if (ip == null || ip.isEmpty()) {
                ip = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditClientIpSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_CLIENT_IP,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditClientIpSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_CLIENT_IP,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditClientIpSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_CLIENT_IP,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditClientIpSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_CLIENT_IP,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditClientIpSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_CLIENT_IP,
                            FTP, AuditDAO.AUDIT_TYPE_ID_CLIENT_IP, ip);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForTableName(String splitKey, String tblName) {
            if (tblName == null || tblName.isEmpty()) {
                tblName = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditTableNameSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_TABLE_NAME,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditTableNameSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_TABLE_NAME,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditTableNameSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_TABLE_NAME,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditTableNameSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_TABLE_NAME,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditTableNameSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_TABLE_NAME,
                            FTP, AuditDAO.AUDIT_TYPE_ID_TABLE_NAME, tblName);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForDbAccount(String splitKey, String account) {
            if (account == null || account.isEmpty()) {
                account = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditDbAccountSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditDbAccountSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditDbAccountSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditDbAccountSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditDbAccountSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_ACCOUNT,
                            FTP, AuditDAO.AUDIT_TYPE_ID_DB_ACCOUNT, account);
                    break;
                default:
                    break;
            }
            return seq;
        }

        public static Integer getOrGenerateForDbName(String splitKey, String dbName) {
            if (dbName == null || dbName.isEmpty()) {
                dbName = "null";
            }
            Integer seq = -1;
            switch (splitKey) {
                case MYSQL:
                    seq = getOrGenerate(mysqlAuditDbNameSeqMap, AuditDAO.MYSQL_AUDIT_TYPE_TABLE_DB_NAME,
                            MYSQL, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case HBASE:
                    seq = getOrGenerate(hbaseAuditDbNameSeqMap, AuditDAO.HBASE_AUDIT_TYPE_TABLE_DB_NAME,
                            HBASE, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case SOLR:
                    seq = getOrGenerate(solrAuditDbNameSeqMap, AuditDAO.SOLR_AUDIT_TYPE_TABLE_DB_NAME,
                            SOLR, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case MONGODB:
                    seq = getOrGenerate(mongodbAuditDbNameSeqMap, AuditDAO.MONGODB_AUDIT_TYPE_TABLE_DB_NAME,
                            MONGODB, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                case FTP:
                    seq = getOrGenerate(ftpAuditDbNameSeqMap, AuditDAO.FTP_AUDIT_TYPE_TABLE_DB_NAME,
                            FTP, AuditDAO.AUDIT_TYPE_ID_DB_NAME, dbName);
                    break;
                default:
                    break;
            }
            return seq;
        }

        private static Integer getOrGenerate(Map<String, Integer> seqMap, String tableName, String dbType, int typeId, String content) {
            Integer seq = seqMap.get(content);
            LinkedBlockingQueue<Integer> queue = accessMapQueue.computeIfAbsent(tableName, k -> new LinkedBlockingQueue<>());
            if (seq == null) {
                while (true) {
                    seq = AuditDAO.findAuditHbaseTypeByContent(tableName, content);
                    if (seq != null) {
                        seqMap.put(content, seq);
                        queue.add(seq);
                        return seq;
                    } else {
                        seq = AuditDAO.generateAuditId(tableName, content, typeId, dbType);
                        if (seq != -1) {
                            seqMap.put(content, seq);
                            queue.add(seq);
                            return seq;
                        }
                    }
                }
            } else {
                queue.add(seq);
                return seq;
            }
        }

        /**
         * 后台刷新线程，更新访问时间戳
         *
         * @return
         */
        public static Runnable getFlushTimeTask() {
            return new Runnable() {
                @Override
                public void run() {
                    while (true) {
                        accessMapQueue.forEach((tableName, queue) -> {
                            int size = queue.size();
                            Set<Integer> val = new HashSet<>(size);
                            for (int i = 0; i < size && !queue.isEmpty(); i++) {
                                val.add(queue.poll());
                            }
                            AuditDAO.updateAuditResSeq(tableName, val);
                        });

                        try {
                            Thread.sleep(60000);
                        } catch (InterruptedException e) {
                            e.printStackTrace();
                        }
                    }
                }
            };
        }
    }
}
