package com.doit.day01;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.util.StopWatch;

import java.io.IOException;

public class LoadHFile {
    public static void main(String[] args) throws IOException {
        System.setProperty("HADOOP_USER_NAME","root");

        //创建hbase的配置信息
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum","linux01:2181,linux02:2181,linux03:2181");
        Connection conn = ConnectionFactory.createConnection(conf);
        TableName tableName = TableName.valueOf("test:baoxian");
        Table table = conn.getTable(tableName);
        Admin admin = conn.getAdmin();
        RegionLocator regionLocator = conn.getRegionLocator(tableName);

        LoadIncrementalHFiles loadIncrementalHFiles = new LoadIncrementalHFiles(conf);
        loadIncrementalHFiles.doBulkLoad(new Path("hdfs://linux01:8020/output1/"),admin,table,regionLocator);
    }
}
