package com.arch.bigdata.hbase1x.lx;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;

/**
 * Created by juntaozhang on 2017/11/27.
 * modify by evajyao on 20181019 增加集群队列参数
 * modify by evajyao on 20190213 增加timestamp写的参数
 * modify by evajyao on 20190626 去掉timestamp写参数,增加导入集群配置选择,除指明hadoop3外,其他默认都导入hadoop2
 */
public class BulkLoadJob {
    static Logger logger = LoggerFactory.getLogger(BulkLoadJob.class);

    public static class BulkLoadMap extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
        @Override
        public void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {

            String[] valueStrSplit = value.toString().split("\t");
            String hkey = valueStrSplit[0];
            String family = valueStrSplit[1].split(":")[0];
            String column = valueStrSplit[1].split(":")[1];
            String hvalue = valueStrSplit[2];
            //增加timestamp写的参数
            //String ts = valueStrSplit[3];
            //long hts = Long.parseLong(ts);
            final byte[] rowKey = Bytes.toBytes(hkey);
            final ImmutableBytesWritable hKey = new ImmutableBytesWritable(rowKey);
            Put hPut = new Put(rowKey);
            byte[] cell = Bytes.toBytes(hvalue);
            hPut.addColumn(Bytes.toBytes(family), Bytes.toBytes(column), cell);
            //hPut.addColumn(Bytes.toBytes(family), Bytes.toBytes(column),hts, cell);
            context.write(hKey, hPut);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = HBaseConfiguration.create();
        String inputPath = args[0];
        String outputPathPrefix = "";
        if ( args[1].equals("hadoop3")) {
            //hadoop3的hbase配置
            conf.set("dfs.nameservices", "hacluster,hacluster3");
            conf.set("dfs.ha.namenodes.hacluster3", "n1,n2");
            conf.set("dfs.namenode.rpc-address.hacluster3.n1", "1.hadoop3.com:8020");
            conf.set("dfs.namenode.rpc-address.hacluster3.n2", "2.hadoop3.com:8020");
            conf.set("dfs.namenode.http-address.hacluster3.n1", "1.hadoop3.com:8090");
            conf.set("dfs.namenode.http-address.hacluster3.n2", "2.hadoop3.com:8090");
            conf.set("dfs.client.failover.proxy.provider.hacluster3", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
            conf.set("hbase.zookeeper.quorum","1.hadoop3.com:2181,2.hadoop3.com:2181,3.hadoop3.com:2181");
            outputPathPrefix = "hdfs://hacluster3/";
        }else if( args[1].equals("hbase_ssd")) {
            //hbase_ssd的hbase配置
            conf.set("dfs.nameservices", "hacluster,hacluster-hbase");
            conf.set("dfs.ha.namenodes.hacluster-hbase", "n1,n2");
            conf.set("dfs.namenode.rpc-address.hacluster-hbase.n1", "10.11.16.227:8020");
            conf.set("dfs.namenode.rpc-address.hacluster-hbase.n2", "10.11.16.228:8020");
            conf.set("dfs.namenode.http-address.hacluster-hbase.n1", "10.11.16.227:8090");
            conf.set("dfs.namenode.http-address.hacluster-hbase.n2", "10.11.16.228:8090");
            conf.set("dfs.client.failover.proxy.provider.hacluster-hbase", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
            conf.set("hbase.zookeeper.quorum","10.11.16.227:2181,10.11.16.228:2181,10.10.29.42:2181");
            outputPathPrefix = "hdfs://hacluster-hbase/";
        }else{
            //hadoop2的hbase配置
            conf.set("dfs.nameservices", "hacluster,hacluster2");
            conf.set("dfs.ha.namenodes.hacluster2", "n1,n2");
            conf.set("dfs.namenode.rpc-address.hacluster2.n1", "1.hadoop2.com:8020");
            conf.set("dfs.namenode.rpc-address.hacluster2.n2", "2.hadoop2.com:8020");
            conf.set("dfs.namenode.http-address.hacluster2.n1", "1.hadoop2.com:8090");
            conf.set("dfs.namenode.http-address.hacluster2.n2", "2.hadoop2.com:8090");
            conf.set("dfs.client.failover.proxy.provider.hacluster2", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
            outputPathPrefix = "hdfs://hacluster2/";
        }
        String outputPath = outputPathPrefix + args[2];
        String tableName = args[3];

        if ( args.length > 4 ) {
            conf.set("mapred.job.queue.name",args[4]);
        }

        logger.info(conf.get("dfs.nameservices"));
        logger.info(conf.toString());

        HTable hTable = null;
        try {
            Job job = Job.getInstance(conf, "hbase_bulk_load");
            job.setJarByClass(BulkLoadJob.class);

            job.setMapperClass(BulkLoadJob.BulkLoadMap.class);
            job.setMapOutputKeyClass(ImmutableBytesWritable.class);
            job.setMapOutputValueClass(Put.class);

            // speculation
            job.setSpeculativeExecution(false);
            job.setReduceSpeculativeExecution(false);

            // in/out format
            job.setInputFormatClass(TextInputFormat.class);
            job.setOutputFormatClass(HFileOutputFormat2.class);

            Path path = new Path(outputPath);
            FileSystem fileSystem = path.getFileSystem(conf);
            if (fileSystem.exists(path)) {
                fileSystem.delete(path, true);
            }
            FileInputFormat.setInputPaths(job, inputPath);
            FileOutputFormat.setOutputPath(job, path);

            hTable = new HTable(conf, tableName);
            HFileOutputFormat2.configureIncrementalLoad(job, hTable);

            if (job.waitForCompletion(true)) {
                FsShell shell = new FsShell(conf);
                try {
                    shell.run(new String[]{"-chmod", "-R", "777", outputPath});
                } catch (Exception e) {
                    logger.error("Couldnt change the file permissions ", e);
                    throw new IOException(e);
                }

                //载入到hbase表
                LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
                loader.doBulkLoad(path, hTable);

                if (fileSystem.exists(path)) {
                    fileSystem.delete(path, true);
                }
            } else {
                logger.error("loading failed.");
                System.exit(1);
            }
        } catch (IllegalArgumentException e) {
            e.printStackTrace();
        } finally {
            if (hTable != null) {
                hTable.close();
            }
        }
    }
}
