package com.cetc.sdp.kmga.cs.stream;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.cetc.sdp.kmga.cs.audit.AlertReason;
import com.cetc.sdp.kmga.cs.audit.AuditRuleFastMatcher;
import com.cetc.sdp.kmga.cs.audit.AuditRuleMatchResult;
import com.cetc.sdp.kmga.cs.audit.ResourceAuditObj;
import com.cetc.sdp.kmga.cs.common.IndexTable;
import com.cetc.sdp.kmga.cs.common.RabbitMQConf;
import com.cetc.sdp.kmga.cs.common.TableMapping;
import com.cetc.sdp.kmga.cs.device.DevNotify;
import com.cetc.sdp.kmga.cs.jdbc.AuditDAO;
import com.cetc.sdp.kmga.cs.jdbc.DeviceDAO;
import com.cetc.sdp.kmga.cs.schema.v2.*;
import com.cetc.sdp.kmga.cs.util.*;
import com.rabbitmq.client.QueueingConsumer;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.*;
import org.apache.spark.streaming.rabbitmq.RabbitMQUtils;
import org.apache.spark.streaming.rabbitmq.distributed.JavaRabbitMQDistributedKey;
import org.apache.spark.streaming.rabbitmq.models.ExchangeAndRouting;
import org.apache.spark.util.LongAccumulator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;

import java.io.IOException;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.*;

import static com.cetc.sdp.kmga.cs.stream.StreamUtils.*;

/**
 * @description: 事件日志, Spark Streaming 实时流处理
 * @author： DengQiang
 * @date: 2017/8/17 16:00
 * @deprecated This class has replace by {@link com.cetc.sdp.kmga.cs.stream.nv.EventStreamDriver}
 */
@Deprecated
public class EventStream {

    private static int ARGS_LEN = 3;
    /**
     * 是否对脱敏日志进行规则匹配
     */
    private static boolean ENABLE_AUDIT = false;
    private static volatile OffsetRange[] offsetRanges = null;

    public static void main(String[] args) throws InterruptedException, IOException {
        if (args.length < ARGS_LEN) {
            System.out.println("Usage：<Application Name> <Batch duration> <window> <Receive Source, values: kafka or rabbitmq> <enable audit>");
            System.exit(1);
        }
        Logger logger = LoggerFactory.getLogger(EventStream.class);
        String appName = args[0];
        int duration = Integer.parseInt(args[1]);
        int window = Integer.parseInt(args[2]);
        boolean useKafkaRecv = "kafka".equals(args[3]);
        boolean useRabbitmqRecv = "rabbitmq".equals(args[3]);
        if (args.length > 4) {
            ENABLE_AUDIT = "true".equals(args[4]);
        }

        StreamingConfiguration streamingConfiguration = new StreamingConfiguration("event-config.xml");
        String groupIds = streamingConfiguration.getAppProperties().get("kafka.group.ids");
        String topics = streamingConfiguration.getAppProperties().get("kafka.stream.topics");
        boolean enableAutoBroadcast = Boolean.parseBoolean(streamingConfiguration.getAppProperties().getOrDefault("enable.auto.broadcast", "false"));
        boolean enableSaveErrorLog = Boolean.parseBoolean(streamingConfiguration.getAppProperties().getOrDefault("save.error.log", "true"));
        int broadcastUpdateInterval = Integer.parseInt(streamingConfiguration.getAppProperties().getOrDefault("broadcast.interval.ms", "3600000"));
        //告警等级
        int alertLevel = Integer.parseInt(streamingConfiguration.getAppProperties().get("event.alert.level"));
        //设置rabbitmq 参数
        String mqHost = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.host");
        int mqPort = Integer.parseInt(streamingConfiguration.getRabbitmqProperties().get("rabbitmq.port"));
        String mqUsername = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.username");
        String mqPassword = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.password");
        String mqNotifyQueue = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.notify.queue");
        String mqNotifyExchange = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.notify.exchange");
        String mqDevExchange = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.dev.exchange");
        String mqAuditAlertExch = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.audit.alert.exchange");
        String mqAuditAlertQueue = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.audit.alert.queue");
        String mqLogQueues = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.log.queues");
        String mqNodeList = streamingConfiguration.getRabbitmqProperties().get("rabbitmq.node.list");
        RabbitMQConf mqConf = new RabbitMQConf();
//        mqConf.setMqHost(mqHost);
//        mqConf.setMqPort(mqPort);
        mqConf.setMqUsername(mqUsername);
        mqConf.setMqPassword(mqPassword);
        mqConf.setMqNotifyQueue(mqNotifyQueue);
        mqConf.setMqAuditAlertQueue(mqAuditAlertQueue);
        mqConf.setMqAuditAlertExch(mqAuditAlertExch);
        String[] nodes = mqNodeList.split(",");
        for (String node : nodes) {
            String[] ip = node.split(":");
            if (ip.length == 2) {
                mqConf.addNode(ip[0], Integer.parseInt(ip[1]));
            }
        }

        //初始化上下文环境
        SparkConf conf = new SparkConf().setAppName(appName);
        //使用Kryo序列化库
        conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
        //在Kryo序列化库中注册自定义的类集合
        conf.set("spark.kryo.registrator", EventKryoRegistrator.class.getName());
        JavaSparkContext jsc = new JavaSparkContext(conf);
        jsc.setLogLevel("ERROR");
        JavaStreamingContext jssc = new JavaStreamingContext(jsc, Durations.seconds(duration));
        jsc.setCheckpointDir("/EventStreamCheckPoint");

        //设置kafka参数
        Set<String> topicsSet = new HashSet<>();
        String[] topicArr = topics.split(",");
        String[] groupArr = groupIds.split(",");
        Map<String, Map<String, Object>> kps = new HashMap<>(16);
        for (int i = 0; i < topicArr.length; i++) {
            String topic = topicArr[i].trim();
            topicsSet.add(topic);
            Map<String, Object> kafkaParams = new HashMap<>(16);
            kafkaParams.putAll(streamingConfiguration.getKafkaProperties());
            kafkaParams.put("group.id", groupArr[i]);
            kps.put(topic, kafkaParams);
        }

        //rabbitmq 日志队列参数设置
        Set<String> queueSet = new HashSet<>(32);
        String[] queues = mqLogQueues.split(",");
        Map<String, Map<String, String>> qps = new HashMap<>(32);
        for (int i = 0; i < queues.length; i++) {
            String queue = queues[i].trim();
            queueSet.add(queue);
            Map<String, String> rabbitmqParams = new HashMap<>(32);
            rabbitmqParams.put("hosts", mqNodeList);
            rabbitmqParams.put("vHost", "/");
            rabbitmqParams.put("userName", mqUsername);
            rabbitmqParams.put("password", mqPassword);
            rabbitmqParams.put("ackType", "auto");
            rabbitmqParams.put("maxReceiveTime", args[4]);
            rabbitmqParams.put("maxMessagesPerPartition", args[5]);
            qps.put(queue, rabbitmqParams);
        }

        //设置hbase参数
        List<TableMapping> tableMappings = new ArrayList<>();
        if (useKafkaRecv) {
            topicsSet.forEach(topic -> AuditDAO.findTableMetaInfoByTopic(topic).ifPresent(tableMappings::add));
        } else if (useRabbitmqRecv) {
            queueSet.forEach(queue ->
                    AuditDAO.findTableMetaInfoByQueue(queue).ifPresent(t -> {
                        tableMappings.add(t);
                        qps.get(queue).put("levelParallelism", String.valueOf(t.getNumOfPartition()));
                    }));
        }

        Job[] jobs = new Job[tableMappings.size()];
        boolean[] autoSuffixArr = new boolean[tableMappings.size()];
        for (int i = 0; i < tableMappings.size(); i++) {
            autoSuffixArr[i] = tableMappings.get(i).getAutoAppendSuffix() == 1;
            jobs[i] = configJob(jsc, streamingConfiguration.getHbaseProperties());
        }
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).setUpdateByFixRate(enableAutoBroadcast);
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).setInterval(broadcastUpdateInterval);
        RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
        Broadcast<Boolean> saveErrorLogBc = jsc.broadcast(enableSaveErrorLog);

        Broadcast<RabbitMQConf> rabbitMQConfBroadcast = LazySingletonBroadcast.getOrCreate(jsc, RabbitMQConf.class, mqConf);
        Broadcast<Integer> alertLevelBc = LazySingletonBroadcast.getOrCreate(jsc, "EVENT_ALERT_LEVEL", alertLevel);
        //启动消息接收线程
        if (ENABLE_AUDIT) {
            RabbitMQConsumer auditMsgConsumer = new RabbitMQConsumer();
//            auditMsgConsumer.setHost(mqHost);
//            auditMsgConsumer.setPort(mqPort);
            mqConf.getNodes().forEach(auditMsgConsumer::addAddress);
            auditMsgConsumer.setQueue(mqNotifyQueue);
            auditMsgConsumer.setUserName(mqUsername);
            auditMsgConsumer.setPassword(mqPassword);
            auditMsgConsumer.setExchange(mqNotifyExchange);
            auditMsgConsumer.setXt(RabbitMQProducer.XT.FANOUT);
            auditMsgConsumer.addMsgListener(StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc));
            auditMsgConsumer.start();
        }

        //初始化设备列表
        List<Tuple2<String, Integer>> allDevices = DeviceDAO.findAllDevice();
        Set<String> devSet = new HashSet<>();
        allDevices.forEach(t -> devSet.add(t._1()));
        DeviceSetBroadcastWrapper deviceSetBroadcastWrapper = DeviceSetBroadcastWrapper.getInstance(jsc, devSet);
        deviceSetBroadcastWrapper.setInterval(broadcastUpdateInterval);
        deviceSetBroadcastWrapper.setUpdateByFixRate(enableAutoBroadcast);
        //启动设备增删消息接收线程
        RabbitMQConsumer devMsgConsumer = new RabbitMQConsumer();
//        devMsgConsumer.setHost(mqHost);
//        devMsgConsumer.setPort(mqPort);
        mqConf.getNodes().forEach(devMsgConsumer::addAddress);
        devMsgConsumer.setQueue(mqNotifyQueue);
        devMsgConsumer.setUserName(mqUsername);
        devMsgConsumer.setPassword(mqPassword);
        devMsgConsumer.setExchange(mqDevExchange);
        devMsgConsumer.setXt(RabbitMQProducer.XT.FANOUT);
        devMsgConsumer.addMsgListener(message -> {
            System.out.println("Recv: " + message);
            DevNotify notify = JSON.parseObject(message, DevNotify.class);
            //设备添加标识
            String add = "Add";
            //设备删除标识
            String del = "Del";
            if (notify.getDevNums() != null) {
                if (add.equalsIgnoreCase(notify.getType())) {
                    for (String dev : notify.getDevNums()) {
                        //初始时假设设备状态为离线
                        deviceSetBroadcastWrapper.addDevice(dev);
                    }
                } else if (del.equalsIgnoreCase(notify.getType())) {
                    for (String dev : notify.getDevNums()) {
                        deviceSetBroadcastWrapper.removeDevice(dev);
                    }
                }
            }
        });
        devMsgConsumer.start();

        //对每个topic创建一个stream，以方便处理
        for (int i = 0; i < jobs.length; i++) {
            TableMapping tm = tableMappings.get(i);
            String topic = tm.getTopic();
            Job mainJob = jobs[i];
            String tableName = tableMappings.get(i).getTableName();
            boolean autoSuffix = autoSuffixArr[i];
            long initialSeqValue = StreamUtils.getInitialSeqForTable(tm, LocalDate.now(Tool.zoneId), autoSuffix);

            List<String> ts = new ArrayList<>();
            ts.add(topic);

            Broadcast<TableMapping> tableMappingBc = jsc.broadcast(tm);

            JavaInputDStream<ConsumerRecord<Object, Object>> kafkaStream = null;
            JavaDStream<String> dStream = null;

            if (useKafkaRecv) {
                 kafkaStream = KafkaUtils.createDirectStream(
                        jssc,
                        LocationStrategies.PreferConsistent(),
                        ConsumerStrategies.Subscribe(ts, kps.get(topic))
                );
                dStream = tm.isRepartition() ? kafkaStream.map(r -> r.value().toString()).repartition(tm.getNumOfPartition())
                        : kafkaStream.map(r -> r.value().toString());
            } else if (useRabbitmqRecv) {
                List<JavaRabbitMQDistributedKey> distributedKeys = new LinkedList<>();
                distributedKeys.add(new JavaRabbitMQDistributedKey(tm.getQueue(),
                        new ExchangeAndRouting(""),
                        qps.get(tm.getQueue())
                ));
                Function<QueueingConsumer.Delivery, String> messageHandler = (Function<QueueingConsumer.Delivery, String>) message -> new String(message.getBody());
                dStream = RabbitMQUtils.createJavaDistributedStream(jssc, String.class, distributedKeys, qps.get(tm.getQueue()), messageHandler);
            }

            //arr stream
            JavaDStream<JSONArray> arrStream = null;

            if (LogConstant.LogType.DESENS_TASK_LOG.equals(tm.getLogType())) {
                //脱敏系统数任务日志
                Broadcast<DesensSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast.getOrCreate(jsc, DesensSchemaDescV2.class, new DesensSchemaDescV2(tm));
                Broadcast<Boolean> enableAuditBc = LazySingletonBroadcast.getOrCreate(jsc, "ENABLE_AUDIT", ENABLE_AUDIT);

                //process stream
                JavaDStream<DesensSchemaDescV2.DensesLog> auditStream = dStream.transform(steam -> {
                    Broadcast<AuditRuleFastMatcher> bc = StreamUtils.RuleFastMatcherBroadcastWrapper.getInstance(jsc).updateAndGet();
                    return steam.map(log -> {
                        //删除日志消息上多余的引号
                        if (log.startsWith("\"")) {
                            log = log.substring(1, log.length() - 1);
                        }
                        DesensSchemaDescV2 desc = schemaDescBroadcast.getValue();
                        StringTokenizer tokenizer = new StringTokenizer(log, "&");
                        Map<String, String> pair = new HashMap<>(32);
                        while (tokenizer.hasMoreTokens()) {
                            String[] tmp = tokenizer.nextToken().split("=");
                            if (tmp.length == 2 && !tmp[1].isEmpty()) {
                                pair.put(tmp[0], tmp[1]);
                            }
                        }
                        JSONArray jsonArray = new JSONArray(64);
                        jsonArray.set(desc.logFieldIndex.TASK_TM, pair.get("task_time"));
                        jsonArray.set(desc.logFieldIndex.TASK_ID, pair.get("task_id"));
                        jsonArray.set(desc.logFieldIndex.TASK_NAME, pair.get("task_name"));
                        jsonArray.set(desc.logFieldIndex.TASK_TYPE, pair.get("task_type"));
                        jsonArray.set(desc.logFieldIndex.SRC_ADDR, pair.get("src_addr"));
                        jsonArray.set(desc.logFieldIndex.SRC_PORT, pair.get("src_port"));
                        jsonArray.set(desc.logFieldIndex.SRC_NAME, pair.get("src_name"));
                        jsonArray.set(desc.logFieldIndex.SRC_DB_NAME, pair.get("src_db_name"));
                        jsonArray.set(desc.logFieldIndex.SRC_TAB_NAME, pair.get("src_tab_name"));
                        jsonArray.set(desc.logFieldIndex.TAB_NUM_ROWS, pair.get("tab_num_rows"));
                        jsonArray.set(desc.logFieldIndex.DESE_DETAIL, pair.get("desc_detail"));
                        jsonArray.set(desc.logFieldIndex.DESE_NUM_ROW, pair.get("mask_line"));
                        jsonArray.set(desc.logFieldIndex.DESE_CONDT, pair.get("dese_condt"));
                        jsonArray.set(desc.logFieldIndex.DEST_ADDR, pair.get("dest_addr"));
                        jsonArray.set(desc.logFieldIndex.DEST_PORT, pair.get("dest_port"));
                        jsonArray.set(desc.logFieldIndex.DEST_DB_NAME, pair.get("dest_db_name"));
                        jsonArray.set(desc.logFieldIndex.DEST_TAB_NAME, pair.get("dest_tab_name"));
                        jsonArray.set(desc.logFieldIndex.DEST_NAME, pair.get("dest_name"));
                        jsonArray.set(desc.logFieldIndex.DEST_TYPE, pair.get("dest_db_type"));
                        jsonArray.set(desc.logFieldIndex.EXTE_1, pair.get("exte_1"));
                        jsonArray.set(desc.logFieldIndex.EXTE_2, pair.get("exte_2"));
                        jsonArray.set(desc.logFieldIndex.CLIENT_IP, pair.get("src_ip"));
                        jsonArray.set(desc.logFieldIndex.USR, pair.get("usr"));
                        jsonArray.set(desc.logFieldIndex.SRC_LEVEL, pair.get("log_level"));
                        jsonArray.set(desc.logFieldIndex.ACT_RESULT, pair.get("status"));
                        jsonArray.set(desc.logFieldIndex.SRC_TYPE, pair.get("src_db_type"));
                        jsonArray.add(pair.getOrDefault("rept_device_num", ""));
                        jsonArray.add(log);
                        return jsonArray;
                    }).filter(arr -> {
                        String src = (String) arr.remove(arr.size() - 1);
                        DesensSchemaDescV2 desc = schemaDescBroadcast.getValue();
                        //格式异常检查
                        try {
                            arr.set(desc.logFieldIndex.SRC_PORT, arr.getInteger(desc.logFieldIndex.SRC_PORT));
                            String numStr = arr.getString(desc.logFieldIndex.DESE_NUM_ROW);
                            if (null != numStr && !numStr.isEmpty()) {
                                arr.set(desc.logFieldIndex.DESE_NUM_ROW, arr.getInteger(desc.logFieldIndex.DESE_NUM_ROW));
                            } else {
                                arr.set(desc.logFieldIndex.DESE_NUM_ROW, 0);
                            }
                        } catch (Exception e) {
                            logger.info("日志数据格式异常，{}", arr.toJSONString());
                            e.printStackTrace();
                            if (saveErrorLogBc.value()) {
                                AuditDAO.saveErrorLog(LogConstant.LogType.DESENS_TASK_LOG, src);
                            }
                            return false;
                        }
                        return true;
                    }).map(m -> {
                        DesensSchemaDescV2 desc = schemaDescBroadcast.getValue();
                        ResourceAuditObj auditObj = bc.value().getResourceId(m.getString(desc.logFieldIndex.SRC_ADDR), m.getInteger(desc.logFieldIndex.SRC_PORT), ResourceAuditObj.DESENS_SRC);
                        String timeStr = m.getString(desc.logFieldIndex.TASK_TM);
                        LocalDateTime time = Tool.getTimeFromOrDefault(timeStr);
                        String srcDbName = m.getString(desc.logFieldIndex.SRC_DB_NAME);
                        String op;
                        boolean opType = (srcDbName == null || srcDbName.isEmpty());
                        if (opType) {
                            op = "读";
                        } else {
                            op = "查";
                        }

                        String devId = (String)m.remove(m.size()-1);
                        //初始化结果集
                        DesensSchemaDescV2.DensesLog log = desc.buildDensesLog(m);
                        //设置上报设备
                        log.setReptDeviceNum(devId);
                        //设置操作类型
                        log.setSrcOperType(op);
                        //设置操作结果
                        int num = m.getInteger(desc.logFieldIndex.DESE_NUM_ROW);
                        log.setActResult(num > 0 ? "success" : "failed");
                        //生成时间戳
                        long timestamp = getIdTimestamp(time);
                        String ip = m.getString(desc.logFieldIndex.SRC_ADDR);
                        AuditRuleMatchResult armr = null;

                        //确定脱敏类型
                        String desensType = "99999";
                        switch (log.getTaskType()) {
                            //数据库静态脱敏
                            case "11":
                            case "13":
                                if (auditObj != null) {
                                    String typeCode = auditObj.getResTypeCode();
                                    if (typeCode != null && !typeCode.isEmpty()) {
                                        desensType = "00" + typeCode;
                                    }
                                }
                                break;
                            //数据库动态脱敏
                            case "12":
                                if (auditObj != null) {
                                    String typeCode = auditObj.getResTypeCode();
                                    if (typeCode != null && !typeCode.isEmpty()) {
                                        desensType = "01" + typeCode;
                                    }
                                }
                                break;
                            //文件静态脱敏
                            case "21":
                            case "23":
                                desensType = "02999";
                                break;
                            //文件动态脱敏
                            case "22":
                                desensType = "03999";
                                break;
                            default:
                                break;
                        }

                        if (auditObj != null) {
                            /**
                             * 规则匹配
                             */
                            if (enableAuditBc.getValue()) {
                                armr = bc.getValue().match(auditObj.getResrcId(), ip, time, op);
                            }
                            log.setSrcResrcId(String.valueOf(auditObj.getResrcId()));
                            log.setSrcAuditObjId(String.valueOf(auditObj.getId()));
                        }
                        String id = desensType + timestamp;
                        log.setId(id);

                        int riskLevel = 0;
                        if (armr != null) {
                            //规则匹配成功
                            log.setSrcRuleId(String.valueOf(armr.getRuleId()));
                            log.setSrcLevel(Integer.toString(armr.getRiskLevel()));
                            riskLevel = armr.getRiskLevel();
                        } else {
                            //规则匹配失败
                            log.setSrcLevel("0");
                        }
                        return log;
                    });
                }).cache();

                saveDesensTask(auditStream, jsc, tableName, initialSeqValue, mainJob, rabbitMQConfBroadcast, tableMappingBc, schemaDescBroadcast, alertLevelBc, enableAuditBc);
            } else if (LogConstant.LogType.DESENS_OP_LOG.equals(tm.getLogType())) {
                //脱敏系统操作日志
                DesensOpSchemaDescV2 descV2 = new DesensOpSchemaDescV2(tm);
                Broadcast<DesensOpSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast.getOrCreate(jsc, DesensOpSchemaDescV2.class, descV2);
                JavaDStream<List<String>> cleanStream = dStream.transform(stream ->
                        stream.map(log -> {
                            //删除日志消息上多余的引号
                            if (log.startsWith("\"")) {
                                log = log.substring(1, log.length() - 1);
                            }
                            DesensOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            StringTokenizer tokenizer = new StringTokenizer(log, "&");
                            Map<String, String> pair = new HashMap<>(16);
                            while (tokenizer.hasMoreTokens()) {
                                String[] tmp = tokenizer.nextToken().split("=");
                                if (tmp.length == 2 && !tmp[1].isEmpty()) {
                                    pair.put(tmp[0], tmp[1]);
                                }
                            }
                            JSONArray jsonArray = new JSONArray(16);
                            jsonArray.set(desc.logFieldIndex.OPER_TM, pair.get("date"));
                            jsonArray.set(desc.logFieldIndex.IP_ADDR, pair.get("ip"));
                            jsonArray.set(desc.logFieldIndex.USER_NAME, pair.get("user"));
                            jsonArray.set(desc.logFieldIndex.OPER_MODL, pair.get("module"));
                            jsonArray.set(desc.logFieldIndex.OPER_TYPE, pair.get("act"));
                            jsonArray.set(desc.logFieldIndex.LOG_TYPE, pair.get("log_type"));
                            jsonArray.set(desc.logFieldIndex.LOG_LEVEL, pair.get("log_level"));
//                            jsonArray.set(desc.logFieldIndex.OPER_STAT, pair.get("status"));
                            String operStat = pair.get("status");
                            if ("成功".equals(operStat)) {
                                jsonArray.set(desc.logFieldIndex.OPER_STAT, "success");
                            } else if ("失败".equals(operStat)) {
                                jsonArray.set(desc.logFieldIndex.OPER_STAT, "failed");
                            } else {
                                jsonArray.set(desc.logFieldIndex.OPER_STAT, "");
                            }
//                            return jsonArray;
//                        }).filter(arr -> {
//                            return true;
//                        }).map(jsonArray -> {
//                            DesensOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            //初始化结果集
                            List<String> res = desc.buildLog(jsonArray);
                            //设置上报设备
                            res.set(desc.logFieldIndex.REPT_DEVICE_NUM, "");
                            res.set(desc.logFieldIndex.CREATE_TM, Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId)));
                            //设置id
                            res.set(desc.logFieldIndex.ID, String.valueOf(getIdTimestamp(jsonArray.getString(desc.logFieldIndex.OPER_TM))));
                            return res;
                        })).cache();
                saveOpLog(cleanStream, jsc, tableName, initialSeqValue, mainJob, tableMappingBc, schemaDescBroadcast, descV2.logFieldIndex.ID);
            } else {
                arrStream = dStream.transform(stream -> {
                    Broadcast<Set<String>> devsBc = DeviceSetBroadcastWrapper.getInstance(jsc, devSet).updateAndGet();
                    return stream.map(record -> {
                        Map<String, Object> res = new HashMap<>(48);
                        try {
                            Map<String, Object> map = (Map<String, Object>) JSON.parse(record);
                            for (Map.Entry<String, Object> entry : map.entrySet()) {
                                res.put(entry.getKey().toLowerCase(), entry.getValue());
                            }
                        } catch (Exception e) {
                            e.printStackTrace();
                        }
                        res.put("__source__", record);
                        return res;
                    }).filter(m -> {
                        String logType = tableMappingBc.getValue().getLogType();
                        Object dev = m.get("devid");
                        boolean flag = logType.equals(String.valueOf(m.get("logtype")))
                                && dev != null && (devsBc.getValue().contains(dev) || "42".equals(logType));
                        if (flag) {
                            Object obj = m.get("datas");
                            if (obj instanceof JSONArray) {
                                JSONArray arr = (JSONArray) obj;
                                flag = arr.size() > 0 && arr.get(0) instanceof JSONArray;
                            } else {
                                flag = false;
                                if (saveErrorLogBc.value()) {
                                    AuditDAO.saveErrorLog(LogConstant.LogType.DESENS_TASK_LOG, (String) m.get("__source__"));
                                }
                            }
                        } else {
                            if (saveErrorLogBc.value()) {
                                AuditDAO.saveErrorLog(LogConstant.LogType.DESENS_TASK_LOG, (String) m.get("__source__"));
                            }
                        }
                        return flag;
                    }).flatMap(m -> {
                        String devId = (String) m.get("devid");
                        JSONArray arr = (JSONArray) m.get("datas");
                        arr.forEach(x -> ((JSONArray) x).add(devId));
                        return arr.iterator();
                    }).map(m -> (JSONArray) m);
                }).cache();
            }

            if (LogConstant.LogType.DB_AUDIT_OP_LOG.equals(tm.getLogType())) {
                //审计系统操作日志
                AuditOpSchemaDescV2 descV2 = new AuditOpSchemaDescV2(tm);
                Broadcast<AuditOpSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast.getOrCreate(jsc, AuditOpSchemaDescV2.class, descV2);
                JavaDStream<List<String>> cleanStream = arrStream.transform(stream ->
                        stream.filter(ja -> {
                            AuditOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            String operStat = ja.getString(desc.logFieldIndex.OPER_STAT);
                            if (operStat != null &&
                                    ("success".equalsIgnoreCase(operStat) ||
                                            "failed".equalsIgnoreCase(operStat) || "start".equalsIgnoreCase(operStat))) {
                                return true;
                            }
                            return false;
                        }).map(jsonArray -> {
                            AuditOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            String devId = (String)jsonArray.remove(jsonArray.size()-1);
                            //初始化结果集
                            List<String> res = desc.buildLog(jsonArray);
                            //设置上报设备
                            res.set(desc.logFieldIndex.REPT_DEVICE_NUM, devId);
                            res.set(desc.logFieldIndex.CREATE_TM, Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId)));
                            //设置id
                            res.set(desc.logFieldIndex.ID, String.valueOf(getIdTimestamp(jsonArray.getString(desc.logFieldIndex.OPER_TM))));
                            return res;
                        }));
                saveOpLog(cleanStream, jsc, tableName, initialSeqValue, mainJob, tableMappingBc, schemaDescBroadcast, descV2.logFieldIndex.ID);
            } else if (LogConstant.LogType.SUPERVISE_USER_OPER_LOG.equals(tm.getLogType())) {
                //监管平台操作日志
                SuperviseOpSchemaDescV2 descV2 = new SuperviseOpSchemaDescV2(tm);
                Broadcast<SuperviseOpSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast.getOrCreate(jsc, SuperviseOpSchemaDescV2.class, descV2);
                JavaDStream<List<String>> cleanStream = arrStream.transform(stream ->
                        stream.filter(ja -> {
                            SuperviseOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            String operStat = ja.getString(desc.logFieldIndex.OPER_STAT);
                            if (operStat != null &&
                                    ("success".equalsIgnoreCase(operStat) ||
                                            "failed".equalsIgnoreCase(operStat))) {
                                return true;
                            }
                            return false;
                        }).map(jsonArray -> {
                            SuperviseOpSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            String devId = (String)jsonArray.remove(jsonArray.size()-1);
                            //初始化结果集
                            List<String> res = desc.buildLog(jsonArray);
                            //设置上报设备
                            res.set(desc.logFieldIndex.REPT_DEVICE_NUM, devId);
                            res.set(desc.logFieldIndex.CREATE_TM, Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId)));
                            //设置id
                            res.set(desc.logFieldIndex.ID, String.valueOf(getIdTimestamp(jsonArray.getString(desc.logFieldIndex.OPER_TM))));
                            return res;
                        }));
                saveOpLog(cleanStream, jsc, tableName, initialSeqValue, mainJob, tableMappingBc, schemaDescBroadcast, descV2.logFieldIndex.ID);
            } else if (LogConstant.LogType.OPS_SECU_LOG.equals(tm.getLogType())
                    || LogConstant.LogType.OPS_ALERT_LOG.equals(tm.getLogType())) {
                //运维系统操作日志
                OpsSchemaDescV2 descV2 = new OpsSchemaDescV2(tm);
                Broadcast<OpsSchemaDescV2> schemaDescBroadcast = LazySingletonBroadcast.getOrCreate(jsc, OpsSchemaDescV2.class, descV2);
                JavaDStream<List<String>> cleanStream = arrStream.transform(stream ->
                        stream.map(jsonArray -> {
                            OpsSchemaDescV2 desc = schemaDescBroadcast.getValue();
                            String devId = (String)jsonArray.remove(jsonArray.size()-1);
                            //初始化结果集
                            List<String> res = desc.buildLog(jsonArray);
                            //设置上报设备
                            res.set(desc.logFieldIndex.REPT_DEVICE_NUM, devId);
                            LocalDateTime now = LocalDateTime.now(Tool.zoneId);
                            res.set(desc.logFieldIndex.CREATE_TM, Tool.DEFAULT_TIME_FORMATTER.format(now));
                            //设置id
                            res.set(desc.logFieldIndex.ID, String.valueOf(getIdTimestamp(jsonArray.getString(desc.logFieldIndex.START_TIME))));
                            return res;
                        }));
                saveOpLog(cleanStream, jsc, tableName, initialSeqValue, mainJob, tableMappingBc, schemaDescBroadcast, descV2.logFieldIndex.ID);
            } else {

            }

            //update kafka offset
            if (kafkaStream != null) {
                CanCommitOffsets offsets = (CanCommitOffsets)kafkaStream.inputDStream();
                kafkaStream.foreachRDD(javaRDD -> {
                    OffsetRange[] offsetRanges = ((HasOffsetRanges) javaRDD.rdd()).offsetRanges();
                    offsets.commitAsync(offsetRanges);
                });
            }
        }

        jssc.start();
        jssc.awaitTermination();
        jssc.close();
    }

    /**
     * 保存操作日志数据到hbase
     *
     * @param cleanStream
     * @param jsc
     * @param tableName
     * @param initialSeqValue
     * @param mainJob
     * @param tmBc
     * @param schemaDescBroadcast
     * @param <T>
     * @param idCol
     */
    private static <T> void saveOpLog(JavaDStream<List<String>> cleanStream, JavaSparkContext jsc, String tableName,
                                      long initialSeqValue, Job mainJob, Broadcast<TableMapping> tmBc,
                                      Broadcast<T> schemaDescBroadcast,
                                      int idCol) {
        //入库
        cleanStream.foreachRDD(listJavaRDD -> {
            LongAccumulator accumulator = StreamUtils.JavaIdGenerator.getInstance(jsc, tmBc.getValue().getTopic(), initialSeqValue);
            //由于累加器在rdd转换中不可读，因此必须每次广播
            Broadcast<Long> seqStart = jsc.broadcast(accumulator.value());

            listJavaRDD.zipWithIndex().mapPartitionsToPair(iterator -> {
                List<Tuple2<ImmutableBytesWritable, Put>> res = new ArrayList<>(2048);
                TableMapping tm = tmBc.getValue();
                byte[] tableNameBytes = Bytes.toBytes(tm.getTableName());
                IndexSchemaDesc schemaDesc = (IndexSchemaDesc) schemaDescBroadcast.getValue();
                StringBuilder sb = new StringBuilder();
                List<IndexTable> indexTables = tm.getIndexTables();
                List<byte[]> indexTableBytes = new ArrayList<>();
                if (indexTables != null) {
                    indexTables.forEach(indexTable -> indexTableBytes.add(Bytes.toBytes(indexTable.getTableName())));
                }
                long size = 0;
                while (iterator.hasNext()) {
                    Tuple2<List<String>, Long> tuple2 = iterator.next();
                    long seq = seqStart.getValue() + tuple2._2();
                    List<String> list = tuple2._1();
                    String id = list.get(idCol) + seq;
                    list.set(idCol, id);
                    size++;
                    Put put = new Put(Bytes.toBytes(list.get(idCol)));
                    for (int j = 0; j < list.size(); j++) {
                        if (j != idCol) {
                            TableMapping.Column col = schemaDesc.getColumnByIndex(j);
                            String val = list.get(j);
                            if (val != null && !val.isEmpty()) {
                                put.addColumn(col.getFamilyBytes(), col.getColumnNameBytes(), Bytes.toBytes(val));
                            }
                        }
                    }
                    put.addColumn(schemaDesc.getColumnByIndex(0).getFamilyBytes(), PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(tableNameBytes), put));

                    if (indexTables != null) {
                        for (int j = 0; j < indexTables.size(); j++) {
                            IndexTable indexTable = indexTables.get(j);
                            byte[] indexTableByte = indexTableBytes.get(j);
                            sb.append(list.get(idCol).substring(0, indexTable.getSubkeyLen()))
                                    .append(list.get(indexTable.getIndexColumn()));
                            idAppend(sb, indexTable.getRowkeyLen() - indexTable.getRandomLen(), indexTable.getAppendChar());
                            sb.append(id.substring(id.length() - indexTable.getRandomLen()));
                            Put tmp = new Put(Bytes.toBytes(sb.toString()));
                            tmp.addColumn(Bytes.toBytes("C1"), Bytes.toBytes("ROWKEY"), Bytes.toBytes(list.get(idCol)));
                            tmp.addColumn(Bytes.toBytes("C1"), PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                            res.add(new Tuple2<>(new ImmutableBytesWritable(indexTableByte), tmp));
                            sb.setLength(0);
                        }
                    }
                }
                accumulator.add(size);
                return res.iterator();
            }).saveAsNewAPIHadoopDataset(mainJob.getConfiguration());

            AuditDAO.updateHbaseRecordSeqValue(tmBc.getValue().getId(), accumulator.value());
        });
    }

    /**
     * 保存脱敏任务日志
     *
     * @param stream
     * @param jsc
     * @param tableName
     * @param initialSeqValue
     * @param mainJob
     * @param rabbitMQConfBroadcast
     * @param tmBc
     * @param schemaDescBroadcast
     * @param alertLevelBc
     * @param enableAuditBc
     */
    private static void saveDesensTask(JavaDStream<DesensSchemaDescV2.DensesLog> stream, JavaSparkContext jsc, String tableName,
                                       long initialSeqValue, Job mainJob, Broadcast<RabbitMQConf> rabbitMQConfBroadcast,
                                       Broadcast<TableMapping> tmBc, Broadcast<DesensSchemaDescV2> schemaDescBroadcast, Broadcast<Integer> alertLevelBc, Broadcast<Boolean> enableAuditBc) {
        //入库
        stream.foreachRDD(listJavaRDD -> {
            LongAccumulator accumulator = JavaIdGenerator.getInstance(jsc, tableName, initialSeqValue);
            //由于累加器在rdd转换中不可读，因此必须每次广播
            Broadcast<Long> seqStart = jsc.broadcast(accumulator.value());
            listJavaRDD.zipWithIndex().mapPartitionsToPair(iterator -> {
                List<Tuple2<ImmutableBytesWritable, Put>> res = new ArrayList<>(2048);
                DesensSchemaDescV2 schemaDesc = schemaDescBroadcast.getValue();
                TableMapping tm = tmBc.getValue();
                byte[] tableNameBytes = Bytes.toBytes(tm.getTableName());
                StringBuilder sb = new StringBuilder();
                List<IndexTable> indexTables = tm.getIndexTables();
                List<byte[]> indexTableBytes = new ArrayList<>();
                if (indexTables != null) {
                    indexTables.forEach(indexTable -> indexTableBytes.add(Bytes.toBytes(indexTable.getTableName())));
                }
                long size = 0;
                List<AlertReason> alertReasons = new ArrayList<>(128);
                int alertLevel = alertLevelBc.value();
                //生成入库时间
                LocalDateTime now = LocalDateTime.now(Tool.zoneId);
                LocalDateTime createTime = now.minusNanos(now.getNano());
                String createTimeStr = Tool.DEFAULT_TIME_FORMATTER.format(createTime);
                long timestamp = getIdTimestamp(createTime);

                while (iterator.hasNext()) {
                    Tuple2<DesensSchemaDescV2.DensesLog, Long> tuple2 = iterator.next();
                    DesensSchemaDescV2.DensesLog log = tuple2._1();
                    long seq = seqStart.getValue() + tuple2._2();
                    String id = log.getId() + seq;
                    log.setId(id);
                    size++;

                    log.setCreateTm(createTimeStr);
                    Put put = new Put(Bytes.toBytes(id));
                    for (int j = 0; j < schemaDesc.getFieldCount(); j++) {
                        if (j != schemaDesc.logFieldIndex.ID) {
                            TableMapping.Column col = schemaDesc.getColumnByIndex(j);
                            String val = log.get(j);
                            if (val != null && !val.isEmpty()) {
                                put.addColumn(col.getFamilyBytes(), col.getColumnNameBytes(), Bytes.toBytes(val));
                            }
                        }
                    }
                    put.addColumn(schemaDesc.getColumnByIndex(0).getFamilyBytes(), PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(tableNameBytes), put));

                    if (indexTables != null) {
                        for (int j = 0; j < indexTables.size(); j++) {
                            IndexTable indexTable = indexTables.get(j);
                            byte[] indexTableByte = indexTableBytes.get(j);
                            sb.append(id.substring(0, indexTable.getSubkeyLen()))
                                    .append(log.get(indexTable.getIndexColumn()));
                            idAppend(sb, indexTable.getRowkeyLen() - indexTable.getRandomLen(), indexTable.getAppendChar());
                            sb.append(id.substring(id.length() - indexTable.getRandomLen()));
                            Put tmp = new Put(Bytes.toBytes(sb.toString()));
                            tmp.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                            tmp.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                            res.add(new Tuple2<>(new ImmutableBytesWritable(indexTableByte), tmp));
                            sb.setLength(0);
                        }
                    }

                    //特殊处理资源ID和风险等级的索引表
                    String resId = log.getSrcResrcId();
                    sb.append(resId.isEmpty() ? "9999999999" : resId)
                            .append(timestamp)
                            .append(id.substring(id.length() - 8));
                    Put tmp = new Put(Bytes.toBytes(sb.toString()));
                    tmp.addColumn(INDEX_COLUMN_FAMILY_BYTES, INDEX_COLUMN_ROWKEY_BYTES, Bytes.toBytes(id));
                    tmp.addColumn(INDEX_COLUMN_FAMILY_BYTES, PHOENIX_APPEND_COL, PHOENIX_APPEND_COL_VALUE);
                    res.add(new Tuple2<>(new ImmutableBytesWritable(SDATA_DESE_TASK_DB_LOG_LEVEL), tmp));
                    sb.setLength(0);


                    int level = Integer.valueOf(log.getSrcLevel());
                    //告警
                    if (enableAuditBc.getValue() && level >= alertLevel) {
                        AlertReason reason = new AlertReason();
                        reason.setTime(log.getCreateTm());
                        reason.setClientIp(log.getClientIp());
                        reason.setAuditObjId(log.getSrcAuditObjId());
                        reason.setClientUsr(log.getUsr());
                        reason.setDbName(log.getSrcDbName());
                        reason.setEventId(id);
                        reason.setLevel(log.getSrcLevel());
                        reason.setOpType(log.getSrcOperType());
                        reason.setResrcId(log.getSrcResrcId());
                        reason.setRowNum(log.getTabNumRows());
                        reason.setRuleId(log.getSrcRuleId());
                        reason.setTblName(log.getSrcTabName());
                        reason.setType("1");
                        reason.setLogType(id.substring(0, 5));
                        alertReasons.add(reason);
                    }
                }
                accumulator.add(size);

                if (alertReasons.size() > 0) {
                    sendAlert(alertReasons, rabbitMQConfBroadcast.value());
                }

                return res.iterator();
            }).saveAsNewAPIHadoopDataset(mainJob.getConfiguration());

            AuditDAO.updateHbaseRecordSeqValue(tableName, accumulator.value());
        });
    }

    /**
     * 脱敏任务日志表-资源ID和风险等级的索引表
     */
    private static final byte[] SDATA_DESE_TASK_DB_LOG_LEVEL = Bytes.toBytes("KMGA:SDATA_DESE_TASK_DB_LOG_LEVEL");

}
