package com.shujia.jinjie;


import com.shujia.util.HBaseTool;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.CellSortReducer;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;

import org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner;
import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


/**
 * 手机号,网格编号,城市编号,区县编号,停留时间,进入时间,离开时间,时间分区
 * D55433A437AEC8D8D3DB2BCA56E9E64392A9D93C,117210031795040,83401,8340104,301,20180503190539,20180503233517,20180503
 */
class MyBulkLoadMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue>.Context context) throws IOException, InterruptedException {
        //转成java的String类型
        String line = value.toString();
        //==> tl_hefei_shushan_503.txt <==
        if (!line.contains("shushan") && !line.contains("\\N")) {
            String[] infos = line.split("\t");
            //0 5
            String rowkey = infos[0] + "-" + infos[5];
            String[] colNameArray = {"","wg","city","qx","stayTime","","leaveTime","day"};

            for (int i = 1; i < infos.length; i++) {
                if(i==5){
                    continue;
                }
                //封装成一个KeyValue的对象
                //  public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value)
                KeyValue keyValue = new KeyValue(Bytes.toBytes(rowkey),
                        Bytes.toBytes("info"),
                        Bytes.toBytes(colNameArray[i]),
                        Bytes.toBytes(infos[i]));

                context.write(new ImmutableBytesWritable(Bytes.toBytes(rowkey)),keyValue);
            }

        }

    }
}

public class BulkLoadingDianXin {
    public static void main(String[] args) throws Exception {
//        Configuration conf = new Configuration();
        Configuration conf = HBaseConfiguration.create(); // 获取集群配置文件对象
        conf.set("hbase.zookeeper.quorum", "master,node1,node2"); // zookeeper节点信息
        conf.set("fs.defaultFS", "hdfs://master:9000"); // 指定hdfs入口路径


        Job job = Job.getInstance(conf);
        job.setJobName("BulkLoadingDianXin");
        job.setJarByClass(BulkLoadingDianXin.class);

        // 配置分区类
        // 保证每个Reduce之间是有序的
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);

        // 配置Reduce端
        // 保证每个Reduce内的数据是有序的
        job.setReducerClass(CellSortReducer.class);

        // 配置Map端
        job.setMapperClass(MyBulkLoadMapper.class);
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(KeyValue.class);


        // 这个会被MR忽略，Reduce的数量由Region的数量决定的
        // 除了预分region表在建表的时候，region可能会有多个
        // 其他的hbase表，默认一开始只会提供一个region
        // 这里的reduce个数，默认情况下都是1
//        job.setNumReduceTasks(3);

        // 配置输入输出路径
        FileInputFormat.addInputPath(job, new Path(args[0]));

        Path path = new Path(args[1]);
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(path)) {
            fs.delete(path, true);
        }

        FileOutputFormat.setOutputPath(job, path);

        //获取数据库连接对象
        Connection conn = ConnectionFactory.createConnection(conf);
        //将表名进行封装
        TableName dianXinName = TableName.valueOf("dian_xin_bulk");

        Admin admin = conn.getAdmin();
        //判断目标hbase表是否存在，若不存在就先创建一下
        if(!admin.tableExists(dianXinName)){
            HBaseTool.createOneTable("dian_xin_bulk","info");
        }

        Table dian_xin = conn.getTable(dianXinName);

        //获取该表的region信息
        RegionLocator regionLocator = conn.getRegionLocator(dianXinName);

        // 格式化输出为HFile格式
        HFileOutputFormat2.configureIncrementalLoad(
                job
                , dian_xin
                , regionLocator
        );

        boolean flag = job.waitForCompletion(true);


        if (flag) {
            //-----------------------------hfile文件生成成功！-----------------------------------------
            System.out.println("-----------------------------hfile文件生成成功！-----------------------------------------");
            // 加载HFile到对应的Region中
            System.out.println("正在加载数据");
            BulkLoadHFiles bulkLoadHFiles = BulkLoadHFiles
                    .create(conf);
            bulkLoadHFiles.bulkLoad(dianXinName,path); // 将生成的hfile文件加载到hbase表中

        } else {
            System.out.println("-----------------------------hfile文件生成失败！-----------------------------------------");
        }

    }
    /*
     * hdfs dfs -mkdir -p /test/bulk/input
     * hdfs dfs -put DIANXIN.csv /test/bulk/input
     * hadoop jar hbase-1.0-jar-with-dependencies.jar com.shujia.hbase.Demo08BulkLoading
     */

}
