package com.shujia.wyh.jinjie;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner;
import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

class MyBulkLoadMapper extends Mapper<LongWritable, Text,ImmutableBytesWritable,KeyValue>{
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, ImmutableBytesWritable, KeyValue>.Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] strings = line.split("\t");
        //判断当不是脏数据的时候才开始处理
        if(strings.length>7 && !("\\N".equals(strings[1]))){
            String phoneNum = strings[0];
            String wg = strings[1];
            String city = strings[2];
            String qx = strings[3];
            String stayTime = strings[4];
            String startTime = strings[5];
            String endTime = strings[6];
            String date = strings[7];

            //手机号会重复 可以将手机号与开始进入时间一起作为rowkey
            String id = phoneNum+"_"+startTime;

            ImmutableBytesWritable new_id = new ImmutableBytesWritable(Bytes.toBytes(id));

            //将其他列封装成KeyValue对象
            //public KeyValue(final byte [] row, final byte [] family, final byte [] qualifier, final byte [] value)
            KeyValue value1 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("wg"), Bytes.toBytes(wg));
            KeyValue value2 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("city"), Bytes.toBytes(city));
            KeyValue value3 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("qx"), Bytes.toBytes(qx));
            KeyValue value4 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("stayTime"), Bytes.toBytes(stayTime));
            KeyValue value5 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("endTime"), Bytes.toBytes(endTime));
            KeyValue value6 = new KeyValue(Bytes.toBytes(id), Bytes.toBytes("info"), Bytes.toBytes("date"), Bytes.toBytes(date));

            context.write(new_id,value1);
            context.write(new_id,value2);
            context.write(new_id,value3);
            context.write(new_id,value4);
            context.write(new_id,value5);
            context.write(new_id,value6);
        }
    }
}

public class HBaseBulkLoadingDemo {
    public static void main(String[] args) throws Exception {
        //获取配置文件（实际上来说 bulk loading集群要与hadoop集群是同一个，获取hadoop的配置文件对象或者hbase配置文件对象都可以）
        Configuration conf = HBaseConfiguration.create();
        //配置zookeeper的集群
        conf.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181");

        //创建Job作业实例
        Job job = Job.getInstance(conf);
        //给作业起个名字
        job.setJobName("Hbase bulk loading table dianxin_bulk");
        //设置主类
        job.setJarByClass(HBaseBulkLoadingDemo.class);

        //设置Mapper类
        job.setMapperClass(MyBulkLoadMapper.class);

        //设置最后输出类
        job.setOutputFormatClass(HFileOutputFormat2.class);

        //设置map的输出key类型和value类型
        job.setMapOutputKeyClass(ImmutableBytesWritable.class); // 行键
        job.setMapOutputValueClass(KeyValue.class); // 单元格

        //自己可以不用写reduce的逻辑，但是默认会有一个reduce
        //设置reduce之间的顺序以及reduce内部的顺序
        job.setPartitionerClass(SimpleTotalOrderPartitioner.class);

        //reduce内部的顺序
        job.setReducerClass(KeyValueSortReducer.class);

        //设置文件的输入路径和输出路径
        FileInputFormat.setInputPaths(job,new Path("/bigdata25/bulkload/input/dianxin_data.txt"));
        FileOutputFormat.setOutputPath(job,new Path("/bigdata25/bulkload/out4"));

        //获取hbase的连接对象

        Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = conn.getAdmin();
        Table dianxin_bulk = conn.getTable(TableName.valueOf("dianxin_bulk"));
        String tableName = Bytes.toString(dianxin_bulk.getName().getName());

        RegionLocator regionLocator = conn.getRegionLocator(TableName.valueOf("dianxin_bulk"));

        //使用HFileOutputFormat2创建HFile文件
        HFileOutputFormat2.configureIncrementalLoad(job,dianxin_bulk,regionLocator);

        //提交作业并执行
        boolean b = job.waitForCompletion(true);

        //如果结果是true，表示作业执行成功
        //如果结果是false，表示作业执行失败
        //以下代码适用于老版本，新版本不太行
        if(b){
            System.out.println("=====================HFile文件生成成功！！开始与Hbase中"+tableName+"表建立映射关系==================================");

            BulkLoadHFiles bulkLoadHFiles = BulkLoadHFiles.create(conf);
            bulkLoadHFiles.bulkLoad(TableName.valueOf("dianxin_bulk"),new Path("/bigdata25/bulkload/out4"));
//            LoadIncrementalHFiles loadIncrementalHFiles = new LoadIncrementalHFiles(conf);
//            loadIncrementalHFiles.doBulkLoad(new Path("/bigdata25/bulkload/out1"),admin,dianxin_bulk,regionLocator);
        }else {
            System.out.println("=====================HFile文件生成失败！！！==================================");
        }

//        org.apache.hadoop.hdfs.client.HdfsDataInputStream.getReadStatistics


    }
}
