package com.shujia.jinjie;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * 使用整合MapReduce的方式创建hbase索引。主要的流程如下：
 * 1.1扫描输入表，使用hbase继承类TableMapper
 * 1.2获取rowkey和指定字段名称和字段值
 * 1.3创建Put实例， value=” “, rowkey=班级，column=学号
 * 1.4使用IdentityTableReducer将数据写入索引表
 */
//因为我们现在要读取的数据来自于hbase中的hfile文件，与hdfs上普通的block块文件有所区别，不能直接继承Mapper类
//要继承hbase读取数据专属的Mapper类     TableMapper
//public abstract class TableMapper<KEYOUT, VALUEOUT> extends Mapper<ImmutableBytesWritable, Result, KEYOUT, VALUEOUT>
class MyIndexMapper extends TableMapper<Text, NullWritable> {
    @Override
    protected void map(ImmutableBytesWritable key, Result value, Mapper<ImmutableBytesWritable, Result, Text, NullWritable>.Context context) throws IOException, InterruptedException {
        //ImmutableBytesWritable key --相当于是读取到一行的行键
        //Result value --相当于读取到一行多列的封装
        //获取行键
        String id = Bytes.toString(key.get());
        //获取姓名的列值
        String name = Bytes.toString(value.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")));
        //将学号和姓名拼接起来给到reduce,由reduce处理并写入到到索引表中
        context.write(new Text(id + "-" + name), NullWritable.get());
    }
}

//public abstract class TableReducer<KEYIN, VALUEIN, KEYOUT> extends Reducer<KEYIN, VALUEIN, KEYOUT, Mutation>
class MyIndexReducer extends TableReducer<Text, NullWritable, NullWritable> {
    @Override
    protected void reduce(Text value, Iterable<NullWritable> values, Reducer<Text, NullWritable, NullWritable, Mutation>.Context context) throws IOException, InterruptedException {
        String string = value.toString();
        String id = string.split("-")[0];
        String name = string.split("-")[1];

        //将要添加的数据封装成Put类的对象
        Put put = new Put(Bytes.toBytes(name));
        put.addColumn(Bytes.toBytes("info"),Bytes.toBytes(id),Bytes.toBytes(""));

        context.write(NullWritable.get(), put);

    }
}

public class HBaseIndexDemo1 {
    public static void main(String[] args) throws Exception {
        //创建配置文件对象
        Configuration conf = new Configuration();
        //指定zookeeper的配置信息
        conf.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181");
        //创建Job作业对象
        Job job = Job.getInstance(conf);
        job.setJobName("给学生表创建二级索引表");

        job.setJarByClass(HBaseIndexDemo1.class);
        //因为索引表的构建是建立列值与行键的映射关系，要获取所有的数据
        //scan扫描全表数据
        Scan scan = new Scan();
        //告诉输入的列值来自于哪一个列簇
        scan.addFamily(Bytes.toBytes("info"));

        //先将表名封装成一个TableName的对象
        Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = conn.getAdmin();

        //先将表名封装成一个TableName的对象
        TableName tn = TableName.valueOf("students2_index");
        if (!admin.tableExists(tn)) {
            TableDescriptorBuilder studentsIndex = TableDescriptorBuilder.newBuilder(tn);

            //使用另外一种方式创建列簇并设置布隆过滤器
            ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("info"))
                    .setBloomFilterType(BloomType.ROW).build();
            studentsIndex.setColumnFamily(columnFamilyDescriptor);
            admin.createTable(studentsIndex.build());
            System.out.println(tn + "表创建成功！！！");
        } else {
            System.out.println(tn + "表已经存在！");
        }


        //索引表是执行完MR作业后产生的
        /**
         /**
         * Use this before submitting a TableMap job. It will appropriately set up
         * the job.
         *
         * @param table  The table name to read from.
         * @param scan  The scan instance with the columns, time range etc.
         * @param mapper  The mapper class to use.
         * @param outputKeyClass  The class of the output key.
         * @param outputValueClass  The class of the output value.
         * @param job  The current job to adjust.  Make sure the passed job is
         * carrying all necessary HBase configuration.
         * @throws IOException When setting up the details fails.
         *public static void initTableMapperJob
         * (String table,Scan scan,Class<? extends TableMapper> mapper,Class<?> outputKeyClass,Class<?> outputValueClass,Job job)
         */
        TableMapReduceUtil.initTableMapperJob("students2", scan, MyIndexMapper.class, Text.class, NullWritable.class, job);

        TableMapReduceUtil.initTableReducerJob("students2_index", MyIndexReducer.class, job);

        //提交作业到集群中允许
        boolean b = job.waitForCompletion(true);
        if (b) {
            System.out.println("================== students2索引表构建成功！！！============================");
        } else {
            System.out.println("================== students2索引表构建失败！！！============================");
        }

    }
}
