package com.gy.hadoop.hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

/*
0007	zhangsan	18
0008	lisi	25
0009	wangwu	20

 */

/**
 * 步骤2
 * 家在hfile文件到hbase中去
 */
public class HBaseLoadData {


    /**
     * 方式1： 使用代码家在hfile文件到hbase中
     * 方式2； 使用命令加载  yarn jar $HBASE_HOME/lib/hbase-server-*.jar   completebulkload /data/hbase/table/output t2
     *
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum", "centos102:2181,centos102:2181,centos103:2181");

        Connection conn = ConnectionFactory.createConnection(conf);

        Admin admin = conn.getAdmin();
        TableName tableName = TableName.valueOf("t2");
        Table table = conn.getTable(tableName);

        LoadIncrementalHFiles load = new LoadIncrementalHFiles(conf);

        load.doBulkLoad(
                new Path("hdfs://centos102:8020/data/hbase/table/output"),
                admin,
                table,
                conn.getRegionLocator(tableName));

    }
}
