package com.gy.hadoop.hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;

/*
0007	zhangsan	18
0008	lisi	25
0009	wangwu	20

 */

/**
 * 步骤1
 * 使用map将文件生产hfile文件
 * create 't2','f1'
 * hdfs dfs -mkdir -p /data/hbase/table/input/
 * hdfs dfs -put input_user.txt /data/hbase/table/input/
 * <p>
 */
public class HBaseBulkLoad extends Configured implements Tool {

    @Override
    public int run(String[] args) throws Exception {
        Job job = Job.getInstance(super.getConf());
        job.setJarByClass(HBaseBulkLoad.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));

        job.setMapperClass(HBaseBulkLoadMapper.class);
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(Put.class);

        Connection connection = ConnectionFactory.createConnection(super.getConf());
        Table table = connection.getTable(TableName.valueOf("t2"));

        HFileOutputFormat2.configureIncrementalLoad(job, table, connection.getRegionLocator(TableName.valueOf("t2")));

        job.setOutputFormatClass(HFileOutputFormat2.class);

        FileSystem fs = FileSystem.get(job.getConfiguration());
        if (fs.exists(new Path(args[1]))) {
            fs.delete(new Path(args[1]), true);
        }
        HFileOutputFormat2.setOutputPath(job, new Path(args[1]));


        return job.waitForCompletion(true) ? 0 : 1;
    }


    public static void main(String[] args) throws Exception {
        args = new String[]{
                "/data/hbase/table/input/input_user.txt",
                "/data/hbase/table/output",
        };

        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum", "centos102:2181,centos102:2181,centos103:2181");

        conf.set("fs.defaultFS", "hdfs://centos102:8020");

        conf.set("mapreduce.job.name", HBaseBulkLoad.class.getSimpleName());

        int runCode = ToolRunner.run(conf, new HBaseBulkLoad(), args);
        System.exit(runCode);

    }


    static class HBaseBulkLoadMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] split = value.toString().split("\t");
            if (split.length != 3) return;
            //封装输出的rowkey类型
            ImmutableBytesWritable immutableBytesWritable = new ImmutableBytesWritable(split[0].getBytes());

            //构建put对象
            Put put = new Put(split[0].getBytes());
            put.addColumn("f1".getBytes(), "name".getBytes(), split[1].getBytes());
            put.addColumn("f1".getBytes(), "age".getBytes(), split[2].getBytes());

            context.write(immutableBytesWritable, put);
        }

    }
}
