package com.shujia.hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class Demo10BulkLoading {


    public static class BuikLoadIngMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            String[] split = value.toString().split(",");

            //手机号
            String mdn = split[0];

            //时间
            String start_date = split[1];

            //以手机号和时间作为rowkey
            String rowkey = mdn + "_" + start_date;

            //维度
            String x = split[4];
            String y = split[5];


            //构建hfile的数据格式
            KeyValue xkeyValue = new KeyValue(rowkey.getBytes(), "info".getBytes(), "x".getBytes(), x.getBytes());
            KeyValue ykeyValue = new KeyValue(rowkey.getBytes(), "info".getBytes(), "y".getBytes(), y.getBytes());


            //数据到reduce端
            context.write(new ImmutableBytesWritable(rowkey.getBytes()), xkeyValue);
            context.write(new ImmutableBytesWritable(rowkey.getBytes()), ykeyValue);

        }
    }


    public static void main(String[] args) throws Exception {

        Configuration conf = HBaseConfiguration.create();

        Job job = Job.getInstance(conf);


        job.setJarByClass(Demo10BulkLoading.class);

        job.setJobName("Demo10BulkLoading");


        //执行输出的数据类型
        job.setOutputKeyClass(ImmutableBytesWritable.class);
        job.setOutputValueClass(KeyValue.class);


        job.setMapperClass(BuikLoadIngMapper.class);


        //排序的reduce, hbase 提供的
        job.setReducerClass(KeyValueSortReducer.class);


        //reduce的数量
        job.setNumReduceTasks(4);

        //自定义分区，实现全局有序
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);


        HTable table = new HTable(conf, TableName.valueOf("dianxin"));


        HFileOutputFormat.configureIncrementalLoad(job, table);


        //数据数据的路径
        FileInputFormat.addInputPath(job, new Path("/data/dianxin"));

        //最终生成hfile文件的路径
        FileOutputFormat.setOutputPath(job, new Path("/data/hfile"));

        job.waitForCompletion(true);


        //将hfile文件导入到hbase表所在的目录下面

        System.out.println("但如hfile到hbse表中");
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        loader.doBulkLoad(new Path("/data/hfile"), table);


    }
}
