package com.sunzm.hbase;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.PutCombiner;
import org.apache.hadoop.hbase.mapreduce.PutSortReducer;
import org.apache.hadoop.hbase.tool.BulkLoadHFiles;

import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;

/**
 * 通过BulkLoad导入HBase数据
 *
 * hadoop jar XXX.jar com.sunzm.hbase.GenerateHFileAndBulkLoad 192.168.1.158:2181 /tmp/sunzm/hbase/mytest/call-tmp.dat /tmp/sunzm/hbase/mytest/hfile/ hdfs://192.168.1.158:8020 callRecordId
 *
 * 注意客户端的版本不能高于服务端版本
 * @author Administrator
 * @version 1.0
 * @date 2021-08-11 16:16
 */
public class GenerateHFileAndBulkLoad extends Configured implements Tool {

    //HBase表名
    private static final String TABLE_NAME = "mytest";
    private static final String ROWKEY_NAME = "rowkey.name";

    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "hdfs");

        int exitCode = ToolRunner.run(HBaseConfiguration.create(),
                new GenerateHFileAndBulkLoad(), args);

        System.exit(exitCode);
    }

    private static class HFileMapper extends
            Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
        private static final byte[] COLUMN_FAMILY_CURRENT = Bytes.toBytes("info");
        private static final byte[] COLUMN_FAMILY_HISTORY = Bytes.toBytes("h");
        private static String ROWKEY_FIELD_NAME = "rowKey";

        @Override
        public void run(Context context) throws IOException, InterruptedException {
            Configuration conf = context.getConfiguration();

            ROWKEY_FIELD_NAME = conf.get(ROWKEY_NAME, "rowKey");
        }

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            try {
                //需要处理的文件是JSON
                String line = value.toString();

                JSONObject jsonObject = JSON.parseObject(line);

                byte[] rowKey = Bytes.toBytes(jsonObject.getString(ROWKEY_NAME));

                Set<String> keySet = jsonObject.keySet();

                Put p = new Put(rowKey);
                for (String k : keySet) {

                    String v = jsonObject.getString(k);

                    p.addColumn(COLUMN_FAMILY_CURRENT, Bytes.toBytes(k), v.getBytes());
                }

                context.write(new ImmutableBytesWritable(rowKey), p);
            } catch (Exception e) {
                e.printStackTrace();
                System.out.println("skipping record " + value.toString());
            }
        }
    }

    @Override
    public int run(String[] args) throws Exception {
        /*if (args.length < 4) {
            System.err.println("Usage: BulkImporter <hbase.zookeeper.quorum> <input> <output> <fs.defaultFS> [rowkey.name]");
            return -1;
        }*/

        //Hbase config
        Configuration conf = HBaseConfiguration.create();
        /*conf.set("hbase.zookeeper.quorum", args[0]);
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("fs.defaultFS", args[3]);
        conf.set(ROWKEY_NAME, args[4]);*/

        conf.set("hbase.zookeeper.quorum", "192.168.1.158:2181");
        conf.set(ROWKEY_NAME, "callRecordId");

        //conf.addResource(new Path(args[0]));

        Job job = Job.getInstance(conf, getClass().getSimpleName());
        job.setJarByClass(getClass());

        //input & output paths
       /* Path input = new Path(args[1]);
        Path tmpPath = new Path(args[2]);*/

        Path input = new Path("hdfs://192.168.1.158:8020/tmp/sunzm/hbase/mytest/call-tmp.dat");
        Path tmpPath = new Path("hdfs://192.168.1.158:8020/tmp/sunzm/hbase/mytest/hfile/");

        FileInputFormat.addInputPath(job, input);
        FileOutputFormat.setOutputPath(job, tmpPath);

        //input format settings
        job.setInputFormatClass(TextInputFormat.class);

        //Map settings
        job.setMapperClass(HFileMapper.class);
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(Put.class);

        job.setCombinerClass(PutCombiner.class);

        //Reduce settings
        job.setReducerClass(PutSortReducer.class);
        job.setOutputKeyClass(ImmutableBytesWritable.class);
        job.setOutputValueClass(KeyValue.class);

        //HFile settings
        Connection connection = ConnectionFactory.createConnection(conf);
        TableName tableName = TableName.valueOf(TABLE_NAME);
        Table table = connection.getTable(tableName);
        Admin admin = connection.getAdmin();
        RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(TABLE_NAME));
        try {

            HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator);
            HFileOutputFormat2.setOutputPath(job, tmpPath);
            HFileOutputFormat2.setCompressOutput(job, true);
            HFileOutputFormat2.setOutputCompressorClass(job, SnappyCodec.class);

            //kick off MR job
            if (!job.waitForCompletion(true)) {
                return 1;
            }

            //change permissions so that HBase user can read it
             /*FileSystem fs = FileSystem.get(conf);
            FsPermission changedPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
            fs.setPermission(tmpPath, changedPermission);
           List<String> files = getAllFilePath(tmpPath, fs);
            for (String file : files) {
                fs.setPermission(new Path(file), changedPermission);
                System.out.println("Changing permission for file " + file);
            }*/

            //bulk load hbase files
            //BulkLoadHFiles bulkLoadHFiles = BulkLoadHFiles.create(conf);

            //bulkLoadHFiles.bulkLoad(tableName, tmpPath);

            //delete the hfiles
            //FileSystem.get(conf).delete(tmpPath, true);
            return 0;

        } finally {
            table.close();
            admin.close();
        }
    }

    /***
     * Given a path, list all folders and files
     * @param filePath
     * @param fs
     * @return
     * @throws FileNotFoundException
     * @throws IOException
     */
    public static List<String> getAllFilePath(Path filePath, FileSystem fs) throws FileNotFoundException, IOException {
        List<String> fileList = new ArrayList<String>();
        FileStatus[] fileStatus = fs.listStatus(filePath);
        for (FileStatus fileStat : fileStatus) {
            if (fileStat.isDirectory()) {
                fileList.add(fileStat.getPath().toString());
                fileList.addAll(getAllFilePath(fileStat.getPath(), fs));
            } else {
                fileList.add(fileStat.getPath().toString());
            }
        }
        return fileList;
    }
}
