package com.caul.demo.hbase.bulkload;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 这是一个yarn的客户端
 * 功能:将我们写到mapreduce程序(jar)提交给yarn服务器, 让yarn将jar分发到所有的nodemanager执行
 * 不建议用java -cp XXX.jar MainClass启动,
 * 建议用hadoop jar XXX.jar MainClass启动 [reduceTasks] [outputDir] [inputFile/inputPath...](它会自动导入依赖)
 * 加入"-DHADOOP_USER_NAME=root",可以伪装当前用户为root
 * Created by sdliang on 2018/3/30.
 */
public class ImportSubmitter {

  public static void main(String[] args) throws Exception {

    int reduceTasks = Integer.parseInt(args[0]);

    Path destPath = new Path(args[1]);


    Path[] srcPaths = new Path[args.length - 2];
    for (int i = 2; i < args.length; i++) {
      srcPaths[i - 2] = new Path(args[i]);
    }

    Configuration conf = new Configuration();
    //加载个性配置
    conf.addResource("custom.xml");

    //压缩设置(适合于IO量大的情况, reduce压缩的必要性少)
    conf.setBoolean(Job.MAP_OUTPUT_COMPRESS, true);
    conf.setClass(Job.MAP_OUTPUT_COMPRESS_CODEC, GzipCodec.class, CompressionCodec.class);

    //指定放到yarn集群中的jar文件的路径
    conf.set("mapred", "D:/mapreduce.jar");

    Job job = Job.getInstance(conf);

    //指定执行的jar包
    //    job.setJar("pv.jar");
    job.setJarByClass(ImportSubmitter.class);
    //指定mapper信息
    job.setMapperClass(ImportMapper.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    job.setMapOutputValueClass(Put.class);
    //告诉yarn源文件的读取类型
    job.setInputFormatClass(TextInputFormat.class);
    //设置输入和输出的文件路径
    FileInputFormat.setInputPaths(job, srcPaths);
    FileOutputFormat.setOutputPath(job, destPath);
    //设置输出结果
    job.setOutputFormatClass(HFileOutputFormat2.class);
    //设置reducer分区机制
    job.setNumReduceTasks(0); //不执行reducer

    //设置HBase
    Configuration hbaseConfg = HBaseConfiguration.create();
    hbaseConfg.set("hbase.zookeeper.quorum", "cdh-namenode:2181");

    TableName tableName = TableName.valueOf("test:t_org");
    Connection conn = ConnectionFactory.createConnection(hbaseConfg);
    Table table = conn.getTable(tableName);
    RegionLocator regionLocator = conn.getRegionLocator(tableName);
    HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator);

    //执行mapreduce
    boolean flag = job.waitForCompletion(true);

    //将hdfs上的HFILE批量入库到HBase
    Admin admin = conn.getAdmin();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(hbaseConfg);
    loader.doBulkLoad(destPath, admin, table, regionLocator);
    //关闭链接
    admin.close();
    conn.close();

    //客户端程序退出
    System.exit(flag ? 0 : 1);
  }
}
