package com.centnet.base.bigdata.hbase;

import com.inspinia.base.pagination.Pager;
import com.inspinia.base.util.TypeChange;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.*;

import static org.apache.hadoop.hbase.TableName.valueOf;
import static org.apache.hadoop.hbase.util.Bytes.toBytes;

/**
 * hbase工具
 * Created by veblen on 16/6/15.
 */
public class HBaseUtil {

    private static final Logger logger = LoggerFactory.getLogger(HBaseUtil.class);
    private static Configuration conf;
    private static Connection conn;

    static {
        String zookeeperHost = HbaseConfig.HBASE_ZOOKEEPER_QUORUM;
        String ipcPoolSize = HbaseConfig.HBASE_CLIENT_IPC_POOL_SIZE;
        String opTimeout = HbaseConfig.HBASE_CLIENT_OPERATION_TIMEOUT;
        String scannerCaching = HbaseConfig.HBASE_CLIENT_SCANNER_CACHING;
        String writeBuffer = HbaseConfig.HBASE_CLIENT_WRITE_BUFFER;
        conf = HBaseConfiguration.create();
        conf.set(HConstants.ZOOKEEPER_QUORUM, zookeeperHost);
        conf.set("hbase.client.ipc.pool.size", ipcPoolSize);
        conf.set("hbase.client.operation.timeout", opTimeout);
        conf.set("hbase.client.scanner.caching", scannerCaching);
        conf.set("hbase.client.write.buffer", writeBuffer);
        try {
            conn = ConnectionFactory.createConnection(conf);
        } catch (IOException e) {
            throw new RuntimeException("连接hbase失败", e);
        }
    }

    /**
     * 创建表
     *
     * @param tableName    创建表
     * @param maxVersion   列的最大版本
     * @param inMemory     创建表的时候，可以通过 HColumnDescriptor.setInMemory(true) 将表放到 RegionServer 的缓存中，保证在读取的时候被 cache 命中。
     * @param columnFamily 列族
     * @return
     */
    public static boolean createTable(String tableName, int maxVersion, boolean inMemory, byte[][] splitKeys, String... columnFamily) {
        HBaseAdmin admin;
        try {
            admin = (HBaseAdmin) conn.getAdmin();
            HTableDescriptor htd = getHTableDescriptor(tableName, maxVersion, inMemory, columnFamily);
            admin.createTable(htd, splitKeys);
            admin.close();
        } catch (IOException e) {
            e.printStackTrace();
            logger.error("hbase create table fail,param:[tablename:" + tableName + ",cf:" + columnFamily + "]");
            return false;
        }
        return true;
    }

    /**
     * 获取表的描述
     *
     * @param tableName    创建表
     * @param maxVersion   列的最大版本
     * @param inMemory     创建表的时候，可以通过 HColumnDescriptor.setInMemory(true) 将表放到 RegionServer 的缓存中，保证在读取的时候被 cache 命中。
     * @param columnFamily 列族
     * @return
     * @throws IOException
     */
    private static HTableDescriptor getHTableDescriptor(String tableName, int maxVersion, boolean inMemory, String... columnFamily) throws IOException {
        HTableDescriptor htd = new HTableDescriptor(valueOf(tableName));
        for (String cf : columnFamily) {
            HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(cf);
            hColumnDescriptor.setInMemory(inMemory);
            hColumnDescriptor.setMaxVersions(maxVersion > 0 ? maxVersion : 1);
            htd.addFamily(hColumnDescriptor);
        }
        String coprocessorClassName = "org.apache.hadoop.hbase.coprocessor.AggregateImplementation";
        htd.addCoprocessor(coprocessorClassName);
        return htd;
    }

    /**
     * 创建表
     *
     * @param tableName    创建表
     * @param maxVersion   列的最大版本
     * @param inMemory     创建表的时候，可以通过 HColumnDescriptor.setInMemory(true) 将表放到 RegionServer 的缓存中，保证在读取的时候被 cache 命中。
     * @param columnFamily 列族
     * @return
     */
    public static boolean createTable(String tableName, int maxVersion, boolean inMemory, String... columnFamily) {
        HBaseAdmin admin = null;
        try {
            admin = (HBaseAdmin) conn.getAdmin();
            HTableDescriptor htd = getHTableDescriptor(tableName, maxVersion, inMemory, columnFamily);
            admin.createTable(htd);
            admin.close();
        } catch (IOException e) {
            e.printStackTrace();
            logger.error("hbase create table fail,param:[tablename:" + tableName + ",cf:" + columnFamily + "]");
            return false;
        }
        return true;
    }

    /**
     * 获取hbase的表对象
     *
     * @param tableName 表名
     * @param autoFlush 自动flush 通过调用 HTable.setAutoFlush(false) 方法可以将 HTable 写客户端的自动 flush 关闭，
     *                  这样可以批量写入数据到 HBase，而不是有一条 put 就执行一次更新，
     *                  只有当 put 填满客户端写缓存时，才实际向 HBase 服务端发起写请求。
     *                  默认情况下 auto flush 是开启的。
     */
    public static HTable getTable(String tableName, boolean autoFlush) {
        HTable table = null;
        try {
            table = (HTable) conn.getTable(valueOf(tableName));
            table.setAutoFlushTo(autoFlush);
            return table;
        } catch (IOException e) {
            logger.error("file to get Table Object hbase,param[tableName:" + tableName + "]");
        }
        return table;
    }


    /**
     * 将一系列的数据put进table的fam:qual中，由rows和vals来定义写入的数据，它们的长期必须相等。
     * 最后的输出是缓冲区大小，默认是2M，由参数hbase.client.write.buffer.
     *
     * @param table        表名
     * @param rowKey       行键
     * @param hbaseColumns 列数据
     */
    public static void put(String table, String rowKey, HBaseColumn... hbaseColumns) throws IOException {
        try (BufferedMutator mutator = conn.getBufferedMutator(TableName.valueOf(table));) {
            Put p = new Put(toBytes(rowKey));
            for (HBaseColumn hbaseColumn : hbaseColumns) {
                p.addColumn(toBytes(hbaseColumn.getFamily()), toBytes(hbaseColumn.getQualifier()),
                        getValue(hbaseColumn.getValue(), hbaseColumn.getValueType()));
            }
            mutator.mutate(p);
            mutator.flush();
        }
    }

    /**
     * 根据value和valuetype获取value的值
     *
     * @param value     对象值
     * @param valueType 对象类型
     * @return
     */
    public static byte[] getValue(Object value, Class valueType) {
        if (valueType.equals(Integer.class)) {
            return toBytes(TypeChange.objectToInt(value));
        } else if (String.class.equals(valueType)) {
            return toBytes(TypeChange.objectToString(value));
        } else if (Long.class.equals(valueType)) {
            return toBytes(TypeChange.objectToLong(value));
        }
        throw new RuntimeException("not support class:" + valueType);
    }


    /**
     * 批量插入
     *
     * @param tableName  表名
     * @param rowColumns key是行键，value是行键对应的列
     */
    public static int batchPut(String tableName, Map<String, List<HBaseColumn>> rowColumns) {
        HTable table = getTable(tableName, false);
        List<Put> puts = new ArrayList(rowColumns.size());

        rowColumns.forEach((key, hBaseColumns) -> {
            for (HBaseColumn column : hBaseColumns) {
                Put put = new Put(toBytes(key));
                put.addImmutable(toBytes(column.getFamily()), toBytes(column.getQualifier()),
                        getValue(column.getValue(), column.getValueType()));
                puts.add(put);
            }
        });
        try {
            table.put(puts);
        } catch (IOException e) {
            throw new RuntimeException(e);
        } finally {
            closeTable(table);
        }
        return rowColumns.size();
    }

    /**
     * 批量插入(单列）
     *
     * @param tableName  表名
     * @param rowColumns key是行键，value是行键对应的列
     */
    public static int batchPutOneColumn(String tableName, Map<String, HBaseColumn> rowColumns){
        HTable table = getTable(tableName, false);
        List<Put> puts = new ArrayList(rowColumns.size());
        Set<String> keys = rowColumns.keySet();
        Iterator<String> iterator = keys.iterator();
        while (iterator.hasNext()) {
            String key = iterator.next();
            HBaseColumn column = rowColumns.get(key);
            Put put = new Put(toBytes(key));
            put.addImmutable(toBytes(column.getFamily()), toBytes(column.getQualifier()),
                    getValue(column.getValue(), column.getValueType()));
            puts.add(put);
        }
        try {
            table.put(puts);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }finally {
            closeTable(table);
        }
        return rowColumns.size();
    }


    /**
     * 获取指定单元格数据
     *
     * @param tableName 表名
     * @param rowKey    行键
     * @return
     */
    public static byte[] get(String tableName, String rowKey, String family, String qualifier) {
        HTable table = null;
        try {
            table = (HTable) conn.getTable(valueOf(tableName));
            Get get = new Get(toBytes(rowKey));
            Result result = table.get(get);
            byte[] value = result.getValue(toBytes(family), toBytes(qualifier));
            table.close();
            return value;
        } catch (IOException e) {
            logger.error("fail to get row from hbase, param[tableName:" + tableName + ",roleKey:" + rowKey + "]");
        }
        return null;

    }

    /**
     * 获取指定rowkey下的所有列族信息
     *
     * @param table  表名
     * @param rowKey 行键
     * @return
     */
    public static Result get(Table table, String rowKey) throws IOException {
        return get(table, rowKey, null, null);
    }

    /**
     * 获取指定rowkey下的所有列族信息,追加过滤器，追加指定列族
     *
     * @param table  表名
     * @param rowKey 行键
     * @return
     */
    public static Result get(Table table, String rowKey, Filter filter, String family) throws IOException {
        Get get = new Get(toBytes(rowKey));
        if (filter != null) {
            get.setFilter(filter);
        }
        if (family != null) {
            get.addFamily(toBytes(family));
        }
        return table.get(get);
    }

    public static Result get(Table table, String rowKey, String family) throws IOException {
        return get(table, rowKey, null, family);
    }

    public static Result get(Table table, String rowKey, Filter filter) throws IOException {
        return get(table, rowKey, filter, null);

    }


    /**
     * 检查指定rowKey下是否存在指定列。
     *
     * @param table
     * @param rowKey
     * @return
     * @throws IOException
     */
    public static boolean isColumnExists(Table table, String rowKey, String family, String column) throws IOException {
        Get get = new Get(toBytes(rowKey));
        get.addColumn(toBytes(family), toBytes(column));
        return table.exists(get);
    }

    /**
     * 检查指定rowKey指定列是否存在。
     *
     * @param table
     * @param rowKey
     * @return
     * @throws IOException
     */
    public static boolean isRowExists(Table table, String rowKey) throws IOException {
        Get get = new Get(toBytes(rowKey));
        return table.exists(get);
    }

    /**
     * 删除行
     *
     * @param tableName 表名
     * @param rowKey    行键
     * @return
     */
    public static int delete(String tableName, String rowKey) {
        HTable table;
        try {
            table = (HTable) conn.getTable(valueOf(tableName));
            Delete delete = new Delete(toBytes(rowKey));
            table.delete(delete);
            table.close();
            return 1;
        } catch (IOException e) {
            logger.error("fail to delete row from hbase, param[tableName:" + tableName + ",roleKey:" + rowKey + "]");
        }
        return 0;
    }


    /**
     * 删除表
     *
     * @param tableName
     */
    public static void dropTable(String tableName) {
        try {
            Admin admin = conn.getAdmin();
            TableName tb = valueOf(tableName);
            admin.disableTable(tb);
            admin.deleteTable(tb);
            admin.close();
        } catch (MasterNotRunningException e) {
            logger.error("fail to dropTable from hbase, param[tableName:" + tableName + "");
        } catch (ZooKeeperConnectionException e) {
            logger.error("fail to dropTable from hbase, param[tableName:" + tableName + "");
        } catch (IOException e) {
            logger.error("fail to dropTable from hbase, param[tableName:" + tableName + "");
        }
    }


    /**
     * 条件查询
     *
     * @param tableName
     * @param rowKey1
     * @param rowKey2
     * @param family
     * @param qualifier
     * @return
     */
    public static List<String> scan(String tableName, String rowKey1, String rowKey2, String family, String qualifier) {
        HTable table = null;
        List results = null;
        try {
            table = (HTable) conn.getTable(valueOf(tableName));
            Scan scan = new Scan(toBytes(rowKey1), toBytes(rowKey2));
            ResultScanner resultScaner = table.getScanner(scan);
            results = new ArrayList();
            for (Result result : resultScaner) {
                String str = Bytes.toString(result.getValue(toBytes(family),
                        toBytes(qualifier)));
                results.add(str);
            }
            table.close();
        } catch (IOException e) {
            logger.error("fail to get rows from hbase, param[tableName:" + tableName + ",roleKey1:" + rowKey1 + ",rowKey2:" + rowKey2 + "]");
        }
        return results;
    }

    /**
     * 多条件查询
     *
     * @param tableName
     */
    public static void queryByConditions(String tableName, String[] familyNames, String[] qualifiers, String[] values) {

        try {
            Table table = conn.getTable(valueOf(tableName));
            List<Filter> filters = new ArrayList();
            if (familyNames != null && familyNames.length > 0) {
                int i = 0;
                for (String familyName : familyNames) {
                    Filter filter = new SingleColumnValueFilter(toBytes(familyName),
                            toBytes(qualifiers[i]), CompareFilter.CompareOp.EQUAL, toBytes(values[i]));
                    filters.add(filter);
                    i++;
                }
            }
            FilterList filterList = new FilterList(filters);
            Scan scan = new Scan();
            scan.setFilter(filterList);
            ResultScanner rs = table.getScanner(scan);
            for (Result r : rs) {
                System.out.println("get results from hbase with rowkey:" + new String(r.getRow()));
                for (Cell keyValue : r.rawCells()) {
                    System.out.println("列：" + new String(CellUtil.cloneFamily(keyValue)) + ":" + new String(CellUtil.cloneQualifier(keyValue)) + "====值:" + new String(CellUtil.cloneValue(keyValue)));
                }
            }
            rs.close();
        } catch (Exception e) {
            logger.error("fail to queryByConditions from hbase, param[tableName:" + tableName + "");
        }
    }

    /**
     * 启用表的Coprocessor新特性
     *
     * @param tableName            表名
     * @param coprocessorClassName
     */
    public static void addTableCoprocessor(String tableName, String coprocessorClassName) {
        try {
            Admin admin = conn.getAdmin();
            TableName tb = valueOf(tableName);
            admin.disableTable(tb);
            HTableDescriptor htd = admin.getTableDescriptor(tb);
            htd.addCoprocessor(coprocessorClassName);
            admin.modifyTable(tb, htd);
            admin.enableTable(tb);
        } catch (IOException e) {
            logger.error("fail to addTableCoprocessor from hbase, param[tableName:" + tableName + "");
        }
    }


    /**
     * 1.取样，先随机生成一定数量的rowkey,将取样数据按升序排序放到一个集合里
     * 2.根据预分区的region个数，对整个集合平均分割，即是相关的splitKeys.
     * 3.HBaseAdmin.createTable(HTableDescriptor tableDescriptor,byte[][] splitkeys)
     * 可以指定预分区的splitKey，即是指定region间的rowkey临界值.
     *
     * @param preDataKeys
     * @param slipRegin
     * @return
     */
    public static byte[][] getPreSlipTableRowKeys(List<String> preDataKeys, int slipRegin) {
        Collections.sort(preDataKeys);
        byte[][] splitKeys = new byte[slipRegin][];
        int size = preDataKeys.size();
        //获取每次从集合中取多少数据。
        int partition = (int) Math.ceil(Double.valueOf(size) / Double.valueOf(slipRegin));
        for (int i = 0; i < slipRegin; i++) {
            String s;
            try {
                //每次取第partition个;
                s = preDataKeys.get(i * partition);
            } catch (IndexOutOfBoundsException e) {

                //如果超出,则去最后一个
                s = preDataKeys.get(size - 1);
            }
            splitKeys[i] = toBytes(s);
        }
        return splitKeys;
    }

    private static Scan getScan(byte[] startRow, byte[] endRow) {

        Scan scan = new Scan();
        if (startRow != null) {
            scan.setStartRow(startRow);
        }
        if (endRow != null) {
            scan.setStopRow(endRow);
        }
        //设置缓存数量
        scan.setCaching(1000);
        //开启缓存
        scan.setCacheBlocks(true);
        return scan;
    }

    /**
     * 分页查询
     *
     * @param tableName       表名
     * @param startRow        开始行
     * @param endRow          结束行
     * @param pageNo          当前行
     * @param pageSize        分页大小
     * @param row2ObjCallBack 行转对象的回调函数
     * @param filters         过滤器
     * @return 分页对象
     */
    public static Pager scanByPage(String tableName, String startRow, String endRow, Integer pageNo, Integer pageSize, Row2ObjCallBack row2ObjCallBack, Filter... filters) {
        Pager page = new Pager<>();
        HTable table = null;
        List<byte[]> rowKeyList = new LinkedList<>();
        try {
            //格式化输入信息
            if (pageNo == null || pageNo == 0) {
                pageNo = 1;
            }
            if (pageSize == null || pageSize == 0) {
                pageSize = 100;
            }
            //计算分页的数据范围
            int firstCount = (pageNo - 1) * pageSize;
            int maxCount = firstCount + pageSize - 1;
            table = getTable(tableName, false);
            Scan scan = getScan(Bytes.toBytes(startRow), Bytes.toBytes(endRow));

            // 使用FirstKeyOnlyFilter限制只返回rowkey。
            scan.setFilter(new FirstKeyOnlyFilter());

            //只读取一个版本数据。
            scan.setMaxVersions(1);
            if (filters != null) {
                for (int i = 0; i < filters.length; i++) {
                    scan.setFilter(filters[i]);
                }
            }
            ResultScanner scanner = table.getScanner(scan);
            int i = 0;
            //获取分页数据的rowKey
            System.out.println(startRow);
            for (Result rs : scanner) {
                byte[] rowKey = null;
                if (i >= firstCount && i <= maxCount) {
                    //取出分页所需的rowkey
                    rowKey = rs.getRow();
                    System.out.println(Bytes.toString(rowKey));
                    rowKeyList.add(rowKey);
                }
                i++;
            }
            System.out.println(endRow);
            //通过行健集合构建查询集合
            List<Get> getList = getGetList(rowKeyList);
            Result[] rss = table.get(getList);
            List list = new ArrayList(rowKeyList.size());
            for (Result rs : rss) {
                list.add(resultHandle(rs, row2ObjCallBack));
            }
            page.setPageNo(pageNo);
            page.setPageSize(pageSize);
            page.setTotalCount(i);
            page.setTotalPage(getTotalPage(pageSize, i));
            page.setList(list);
        } catch (Exception e) {
            logger.error("", e);
        } finally {
            closeTable(table);
        }
        return page;

    }

    /**
     * 关闭table
     *
     * @param table
     */
    public static void closeTable(HTable table) {
        if (table == null) {
            return;
        }
        try {
            table.close();
        } catch (Exception e) {
            logger.error("close hbase table" + table.getName() + "fail!!!!!!", e);
        }
    }

    /**
     * 结果的处理
     *
     * @param rs              扫描结果
     * @param row2ObjCallBack 行转对象的回调
     * @return
     */
    private static Object resultHandle(Result rs, Row2ObjCallBack row2ObjCallBack) {
        Map<String, String> columnMap = new HashMap<String, String>();
        Cell[] cells = rs.rawCells();
        columnMap = new HashMap<String, String>();
        boolean isFirst = true;
        String rowKey = null;
        long timeStamp = 0;
        for (Cell cell : cells) {
            if (isFirst) {
                rowKey = Bytes.toString(CellUtil.cloneRow(cell));
                timeStamp = cell.getTimestamp();
                isFirst = false;
            }
            columnMap.put(Bytes.toString(CellUtil.cloneQualifier(cell)),

                    Bytes.toString(CellUtil.cloneValue(cell)));
        }
        return row2ObjCallBack.row2Obj(rowKey, timeStamp, columnMap);
    }

    /**
     * 计算总页数
     *
     * @param pageSize   分页大小
     * @param totalCount 总页数
     * @return
     */
    private static int getTotalPage(int pageSize, int totalCount) {
        int n = totalCount / pageSize;
        if (totalCount % pageSize == 0) {
            return n;
        } else {
            return n + 1;
        }
    }

    /**
     * rowkey的集合转GET操作的list
     *
     * @param rowKeyList
     * @return
     */
    public static List<Get> getGetList(List<byte[]> rowKeyList) {
        List<Get> result = new ArrayList<>(rowKeyList.size());
        rowKeyList.forEach(bytes -> result.add(new Get(bytes)));
        return result;
    }

    /**
     * 表是否存在
     *
     * @param tableName 表名
     * @return true表示表存在
     */
    public static boolean isTableExists(String tableName) {

        TableName tb = TableName.valueOf(tableName);
        try {
            return conn.getAdmin().tableExists(tb);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    public static List getByRowKeys(String tableName, List<byte[]> rowKeys, Row2ObjCallBack row2ObjCallBack) {
        if (rowKeys != null) {
            HTable table = getTable(tableName, false);
            try {
                //通过行健集合构建查询集合
                List<Get> getList = getGetList(rowKeys);
                Result[] rss = table.get(getList);
                List list = new ArrayList(rowKeys.size());
                for (Result rs : rss) {
                    list.add(resultHandle(rs, row2ObjCallBack));
                }
                return list;
            } catch (IOException e) {
                throw new RuntimeException(e);
            } finally {
                closeTable(table);
            }
        }
        return null;
    }
}
