package com.cetcs.kmga.hbase.index;


import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import java.io.IOException;
import java.util.Iterator;
import java.util.List;


/**
 * 数据库的处理器
 *
 * @author xutao
 * @version V1.0 创建时间：2017-09-12 11:44
 *          Copyright 2017 by CETC
 */
public class KmgaHbaseCoprocessor extends BaseRegionObserver {
    private final static Logger LOGGER = LoggerFactory.getLogger(KmgaHbaseCoprocessor.class);

    @Override
    public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException {
        Table table = HBasePool.getInstance().getConnection().getTable(TableName.valueOf("KMGA:SDATA_AUDIT_DB_LOG_TEST_201709_USR"));
        List<Cell> kv = put.get("USR".getBytes(), "USR".getBytes());
        Iterator<Cell> kvItor = kv.iterator();
        while (kvItor.hasNext()) {
            LOGGER.info("开始循环插入数据");
            Cell tmp = kvItor.next();
            String fam = new String(tmp.getFamilyArray());
            String val = new String(tmp.getValueArray());
            LOGGER.info("列簇：" + fam + "====" + "值：" + val);
            Put indexPut = new Put(tmp.getValueArray());
            indexPut.addColumn("USR".getBytes(), tmp.getRowArray(), Bytes.toBytes(System.currentTimeMillis()));
            table.put(indexPut);
        }
        table.close();
    }
}
