package com.shujia.jinjie;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;

import org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;


/**
 * 手机号,网格编号,城市编号,区县编号,停留时间,进入时间,离开时间,时间分区
 * D55433A437AEC8D8D3DB2BCA56E9E64392A9D93C,117210031795040,83401,8340104,301,20180503190539,20180503233517,20180503
 */
class MyBulkLoadMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue>.Context context) throws IOException, InterruptedException {
        //转成java的String类型
        String line = value.toString();
        //==> tl_hefei_shushan_503.txt <==
        if (!line.contains("shushan") && !line.contains("\\N")) {
            String[] infos = line.split("\t");
            //0 5
            String rowkey = infos[0] + "-" + infos[5];
            String[] colNameArray = {"","wg","city","qx","stayTime","","leaveTime","day"};

            for (int i = 1; i < infos.length; i++) {
                if(i==5){
                    continue;
                }
                //封装成一个KeyValue的对象
                //  public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value)
                KeyValue keyValue = new KeyValue(Bytes.toBytes(rowkey),
                        Bytes.toBytes("info"),
                        Bytes.toBytes(colNameArray[i]),
                        Bytes.toBytes(infos[i]));

                context.write(new ImmutableBytesWritable(Bytes.toBytes(rowkey)),keyValue);
            }

        }

    }
}

public class BulkLoadingDianXin {
    public static void main(String[] args) throws Exception {
//获取集群配置文件对象（既然都使用了bulkload方式，就说明hbase集群和hdfs集群是同一个）
        //所以这里获取hadoop中的hdfs集群或者是hbase集群对象
        Configuration conf = HBaseConfiguration.create();
        //设置zookeeper的集群信息
        conf.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181");

        //创建一个Job作业实例
        Job job = Job.getInstance(conf);

        //给job作业起一个名字
        job.setJobName("hbase使用bulkloading方式批量加载数据作业");

        //设置主类
        job.setJarByClass(BulkLoadingDianXin.class);

        //设置map类
        job.setMapperClass(MyBulkLoadMapper.class);

        //无需设置自定义reduce类，使用hbase中的reduce类
        //设置最后输出类
        job.setOutputFormatClass(HFileOutputFormat2.class);
        //设置map的输出key和value类型
        //key -- 行键
        //value -- 单元格（列）
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(KeyValue.class);

        //如果就只是按照我们读取的顺序写入hbase表，是有问题的，顺序的问题
        //没有经历region中字典排序的过程，要想最终呈现排序的过程与hbase对应的话
        //就需要我们在生成hfile文件的同时进行字典顺序排序
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
        job.setReducerClass(KeyValueSortReducer.class); // CellSortReducer

        //设置文件的输入路径
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        //设置hfile文件
        FileOutputFormat.setOutputPath(job, new Path(args[1]));


        //获取数据库连接对象
        Connection conn = ConnectionFactory.createConnection(conf);
        //获取数据库操作对象
        Admin admin = conn.getAdmin();
        TableName dianxinDataBulk = TableName.valueOf("dianxin_data_bulk");
        Table dianxinBulkTable = conn.getTable(dianxinDataBulk);
        //获取hbase表的region信息
        RegionLocator regionLocator = conn.getRegionLocator(dianxinDataBulk);

        //使用HFileOutputFormat2类创建Hfile文件
        HFileOutputFormat2.configureIncrementalLoad(job, dianxinBulkTable, regionLocator);

        //提交作业并执行
        boolean b = job.waitForCompletion(true);
        if (b) {
            System.out.println("====================== Hfile文件生成成功!! 在/bigdata30/hbase/out4目录下 ================================");
//            LoadIncrementalHFiles loadIncrementalHFiles = new LoadIncrementalHFiles(conf);
//            loadIncrementalHFiles.doBulkLoad(new Path("/bigdata30/hbase/out1"), admin, dianxinBulkTable, regionLocator);
        } else {
            System.out.println("============= Hfile文件生成失败!! ==================");
        }
    }
}
