package com.shujia.advance;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class HBaseBulkLoadingDemo {
    public static void main(String[] args) throws Exception {
        //获取配置文件（实际上来说 bulk loading集群要与hadoop集群是同一个，获取hadoop的配置文件对象或者hbase配置文件对象都可以）
        Configuration conf = HBaseConfiguration.create();
        //配置zookeeper的集群
        conf.set("hbase.zookeeper.quorum", "hadoop102:2181,hadoop103:2181,hadoop104:2181");

        //创建Job作业实例
        Job job = Job.getInstance(conf);
        //给作业起个名字
        job.setJobName("Hbase bulk loading table dianxin_bulk");
        //设置主类
        job.setJarByClass(HBaseBulkLoadingDemo.class);

        //设置Mapper类
        job.setMapperClass(MyBulkLoadMapper.class);

        //设置最后输出类
        job.setOutputFormatClass(HFileOutputFormat2.class);

        //设置map的输出key类型和value类型
        job.setMapOutputKeyClass(ImmutableBytesWritable.class); // 行键
        job.setMapOutputValueClass(KeyValue.class); // 单元格

        //自己可以不用写reduce的逻辑，但是默认会有一个reduce
        //设置reduce之间的顺序以及reduce内部的顺序
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);

        //reduce内部的顺序
        job.setReducerClass(KeyValueSortReducer.class);

        //设置文件的输入路径和输出路径
        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));

        //获取hbase的连接对象

        Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = conn.getAdmin();
        Table dianxin_bulk = conn.getTable(TableName.valueOf(args[2]));
        String tableName = Bytes.toString(dianxin_bulk.getName().getName());

        RegionLocator regionLocator = conn.getRegionLocator(TableName.valueOf(args[2]));

        //使用HFileOutputFormat2创建HFile文件
        HFileOutputFormat2.configureIncrementalLoad(job,dianxin_bulk,regionLocator);

        //提交作业并执行
        boolean b = job.waitForCompletion(true);

        //如果结果是true，表示作业执行成功
        //如果结果是false，表示作业执行失败
        //以下代码适用于老版本，新版本不太行
        if(b){
            System.out.println("=====================HFile文件生成成功！！开始与Hbase中"+tableName+"表建立映射关系==================================");


//String[] bulkLoadArgs = {"/bigdata25/bulkload/out1", "dianxin_bulk"};
//            ToolRunner.run(conf, new LoadIncrementalHFiles(conf), bulkLoadArgs);
//            BulkLoadHFiles bulkLoadHFiles = BulkLoadHFiles.create(conf);
//            bulkLoadHFiles.bulkLoad(TableName.valueOf("dianxin_bulk"),new Path("/bigdata25/bulkload/out4"));
            LoadIncrementalHFiles loadIncrementalHFiles = new LoadIncrementalHFiles(conf);
            loadIncrementalHFiles.doBulkLoad(new Path(args[1]),admin,dianxin_bulk,regionLocator);

        }else {
            System.out.println("=====================HFile文件生成失败！！！==================================");
        }

//        org.apache.hadoop.hdfs.client.HdfsDataInputStream.getReadStatistics


    }
}