package com.shujia.jinjie;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 *  使用bulkloading批量想hbase表中导入数据
 */

class MyBulkLoadMapper extends Mapper<LongWritable, Text,ImmutableBytesWritable,KeyValue>{
    /*
    ==> tl_hefei_shushan_503.txt <==
    D55433A437AEC8D8D3DB2BCA56E9E64392A9D93C	117210031795040	83401	8340104	301	20180503190539	20180503233517	20180503
    8827F3196977C6F752680505FEC0C7D3A18D4DFC	\N	\N	\N	\N	\N	\N	\N
     */
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue>.Context context) throws IOException, InterruptedException {
        //将Text类型转成java中String类型
        String line = value.toString();

        //处理脏数据
        if(!(line.contains("shushan")) && !(line.contains("\\N"))){
            String[] strings = line.split("\t");
            String phoneNum = strings[0];
            String wg = strings[1];
            String city = strings[2];
            String qx = strings[3];
            String stayTime = strings[4];
            String startTime = strings[5];
            String endTime = strings[6];
            String date = strings[7];

            //因为手机号代表一个人，一个人可能会出现在多个区域，不能直接将手机号作为rk，为了数据的完整性
            //可以将手机号和进入网格的时间拼接
            byte[] rk = Bytes.toBytes(phoneNum + "-" + startTime);
            byte[] family = Bytes.toBytes("info");

            //创建输出key对象
            ImmutableBytesWritable immutableBytesWritable = new ImmutableBytesWritable(rk);
            //将其余的6列封装成单元格
            //public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value)
            KeyValue kv1 = new KeyValue(rk, family, Bytes.toBytes("wg"), Bytes.toBytes(wg));
            context.write(immutableBytesWritable,kv1);

            KeyValue kv2 = new KeyValue(rk, family, Bytes.toBytes("city"), Bytes.toBytes(city));
            context.write(immutableBytesWritable,kv2);

            KeyValue kv3 = new KeyValue(rk, family, Bytes.toBytes("qx"), Bytes.toBytes(qx));
            context.write(immutableBytesWritable,kv3);

            KeyValue kv4 = new KeyValue(rk, family, Bytes.toBytes("stayTime"), Bytes.toBytes(stayTime));
            context.write(immutableBytesWritable,kv4);

            KeyValue kv5 = new KeyValue(rk, family, Bytes.toBytes("endTime"), Bytes.toBytes(endTime));
            context.write(immutableBytesWritable,kv5);

            KeyValue kv6 = new KeyValue(rk, family, Bytes.toBytes("date"), Bytes.toBytes(date));
            context.write(immutableBytesWritable,kv6);
        }
    }
}


public class BulkLoadingDemo {
    public static void main(String[] args) throws Exception{
        //获取集群配置文件对象（既然都使用了bulkload方式，就说明hbase集群和hdfs集群是同一个）
        //所以这里获取hadoop中的hdfs集群或者是hbase集群对象
        Configuration conf = HBaseConfiguration.create();
        //设置zookeeper的集群信息
        conf.set("hbase.zookeeper.quorum","master:2181,node1:2181,node2:2181");

        //创建一个Job作业实例
        Job job = Job.getInstance(conf);

        //给job作业起一个名字
        job.setJobName("hbase使用bulkloading方式批量加载数据作业");

        //设置主类
        job.setJarByClass(BulkLoadingDemo.class);

        //设置map类
        job.setMapperClass(MyBulkLoadMapper.class);

        //无需设置自定义reduce类，使用hbase中的reduce类
        //设置最后输出类
        job.setOutputFormatClass(HFileOutputFormat2.class);
        //设置map的输出key和value类型
        //key -- 行键
        //value -- 单元格（列）
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        job.setMapOutputValueClass(KeyValue.class);

        //如果就只是按照我们读取的顺序写入hbase表，是有问题的，顺序的问题
        //没有经历region中字典排序的过程，要想最终呈现排序的过程与hbase对应的话
        //就需要我们在生成hfile文件的同时进行字典顺序排序
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
        job.setReducerClass(KeyValueSortReducer.class); // CellSortReducer

        //设置文件的输入路径
        FileInputFormat.setInputPaths(job,new Path("/bigdata29/data/dianxin_data"));
        //设置hfile文件
        FileOutputFormat.setOutputPath(job,new Path("/bigdata29/hbase/out4"));

        //获取数据库连接对象
        Connection conn = ConnectionFactory.createConnection(conf);
        //获取数据库操作对象
        Admin admin = conn.getAdmin();
        TableName dianxinDataBulk = TableName.valueOf("dianxin_data_bulk");
        Table dianxinBulkTable = conn.getTable(dianxinDataBulk);
        RegionLocator regionLocator = conn.getRegionLocator(dianxinDataBulk);

        //使用HFileOutputFormat2类创建Hfile文件
        HFileOutputFormat2.configureIncrementalLoad(job,dianxinBulkTable,regionLocator);

        //提交作业并执行
        boolean b = job.waitForCompletion(true);

        if(b){
            System.out.println("====================== Hfile文件生成成功!! 在/bigdata29/hbase/out4目录下 ================================");
            LoadIncrementalHFiles loadIncrementalHFiles = new LoadIncrementalHFiles(conf);
            loadIncrementalHFiles.doBulkLoad(new Path("/bigdata29/hbase/out4"),admin,dianxinBulkTable,regionLocator);
        }else {
            System.out.println("============= Hfile文件生成失败!! ==================");
        }


    }
}
