package hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
import org.apache.hadoop.hbase.util.Bytes;

/**
 * Created by spark on 16-10-19.
 */
public class MyAggregationClient {

    private static final TableName TABLE_NAME = TableName.valueOf("joe");

    private static final byte[] COLUMN_FAMILY = Bytes.toBytes("info");

    public static void main(String[] args) throws Throwable {
        Configuration conf = new Configuration();
        conf.setStrings("hbase.zookeeper.quorum",
                "localhost");
        conf.setInt("hbase.zookeeper.property.clientPort", 2182);
        // 提高RPC通信时长
        conf.setLong("hbase.rpc.timeout", 600000);
        // 设置Scan缓存
        conf.setLong("hbase.client.scanner.caching", 1000);

        Configuration hbaseConf = HBaseConfiguration.create(conf);
        AggregationClient aggregationClient = new AggregationClient(hbaseConf);

        Scan scan = new Scan();
        // 指定扫描列族， 唯一值
//        scan.addFamily(COLUMN_FAMILY);
        scan.addColumn(COLUMN_FAMILY, Bytes.toBytes("2"));

        long rowCount = aggregationClient.rowCount(TABLE_NAME,
                new LongColumnInterpreter(), scan);

        System.out.println("row count is " + rowCount);

        System.out.println("max row is " + aggregationClient.max(TABLE_NAME,
                new LongColumnInterpreter(), scan));
    }
}
