package com.cetc.sdp.kmga.cs.stream.nv;

import com.alibaba.fastjson.JSONArray;
import com.cetc.sdp.kmga.cs.common.TableMeta;
import com.cetc.sdp.kmga.cs.jdbc.AuditDAO;
import com.cetc.sdp.kmga.cs.stream.StreamUtils;
import com.cetc.sdp.kmga.cs.util.HTableFiled;
import com.cetc.sdp.kmga.cs.util.OpStreamJobConf;
import com.cetc.sdp.kmga.cs.util.Tool;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.util.LongAccumulator;
import scala.Tuple2;

import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * 运维系统日志处理
 * @description:
 * @author： DengQiang
 * @date: 2018/3/30 9:43
 */
public class OpsLogStreaming implements StreamWork.StreamJob {

    private AbstractDStream dStream;
    private OpStreamJobConf conf;

    private final static byte[] CREATE_TM = Bytes.toBytes("CREATE_TM");
    private final static byte[] REPT_DEVICE_NUM = Bytes.toBytes("REPT_DEVICE_NUM");
    private final static byte[] COL_FAMILY = Bytes.toBytes("C1");

    private final static String START_TIME = "START_TIME";

    public OpsLogStreaming(OpStreamJobConf conf, AbstractDStream dStream) {
        this.conf = conf;
        this.dStream = dStream;
    }

    @Override
    public void processStreaming(StreamWork.Context context) {
        Map<Integer, HTableFiled> filedMap = conf.getLogFields();
        Map<String, Integer> colIndex = new HashMap<>(filedMap.size());
        byte[][] fileds = new byte[filedMap.size()][];
        byte[][] colFamily = new byte[filedMap.size()][];
        conf.getLogFields().forEach((k, v) -> {
            fileds[k] = Bytes.toBytes(v.getName());
            colFamily[k] = Bytes.toBytes(v.getColFamily());
            colIndex.put(v.getName(), k);
        });
        byte[] tableNameBytes = Bytes.toBytes(conf.getHTableName());

        int startTimePosition = colIndex.getOrDefault(START_TIME, -1);
        TableMeta tableMeta = AuditDAO.findTableMetaByName(conf.getHTableName());
        long startSeq = StreamUtils.getInitialSeqForTable(tableMeta, LocalDate.now(Tool.zoneId), false);
        Job hbaseJob = context.configJob(conf.getHBaseJobConf());

        JavaDStream<JSONArray> dStream = this.dStream.getPreprocessedJsonDStream(context.getStreamingContext(), conf.getLogType(), conf.saveErrorLog(), context.getDeviceSetBroadcastWrapper(), conf.filterDev());
        dStream.foreachRDD(rdd -> {
            LongAccumulator accumulator = StreamUtils.JavaIdGenerator.getInstance(context.getSparkContext(), conf.getHTableName(), startSeq);
            long seqStart = accumulator.value();
            rdd.zipWithIndex().mapPartitionsToPair(iterator -> {
                List<Tuple2<ImmutableBytesWritable, Put>> res = new ArrayList<>(2048);
                int size = 0;
                byte[] now = Bytes.toBytes(Tool.DEFAULT_TIME_FORMATTER.format(LocalDateTime.now(Tool.zoneId)));
                while (iterator.hasNext()) {
                    Tuple2<JSONArray, Long> dataTuple = iterator.next();
                    JSONArray data = dataTuple._1;
                    String time = String.valueOf(StreamWork.getIdTimestamp(data.getString(startTimePosition)));
                    String id = time + (dataTuple._2() + seqStart);
                    byte[] idBytes = Bytes.toBytes(id);
                    Put put = new Put(idBytes);
                    for(int i = 0; i < fileds.length; i++) {
                        String val = i < data.size() ? data.getString(i) : null;
                        if (val != null && !val.isEmpty()) {
                            put.addColumn(colFamily[i], fileds[i], Bytes.toBytes(val));
                        }
                    }
                    put.addColumn(COL_FAMILY, CREATE_TM, now);
                    put.addColumn(COL_FAMILY, REPT_DEVICE_NUM, Bytes.toBytes(data.getString(data.size()-1)));
                    res.add(new Tuple2<>(new ImmutableBytesWritable(tableNameBytes), put));
                    size++;
                }
                accumulator.add(size);
                return res.iterator();
            }).saveAsNewAPIHadoopDataset(hbaseJob.getConfiguration());
            AuditDAO.updateHbaseRecordSeqValue(conf.getHTableName(), accumulator.value());
            this.dStream.afterBatchProcessed();
        });
    }

    @Override
    public void afterProcess(StreamWork.Context context) {
//        this.dStream.afterBatchProcessed();
    }
}
