package com.shujia.wyh.jinjie;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


//继承TableMapper类只需要指定输出的key和value类型
class MyIndexMapper extends TableMapper<Text,NullWritable>{
    @Override
    protected void map(ImmutableBytesWritable key, Result value, Mapper<ImmutableBytesWritable, Result, Text, NullWritable>.Context context) throws IOException, InterruptedException {
        //key就是Hbase表中的一行唯一标识 rowkey
        //value就是一行数据，包含了很多列
        //map逻辑是作用在每一行上的
        String id = Bytes.toString(key.get());
        // byte[] getValue(byte [] family, byte [] qualifier) 获取姓名
        String name = Bytes.toString(value.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")));

        String new_id = id+"_"+name;
        context.write(new Text(new_id),NullWritable.get());
    }
}

class MyIndexReducer extends TableReducer<Text,NullWritable,NullWritable>{
    @Override
    protected void reduce(Text key, Iterable<NullWritable> values, Reducer<Text, NullWritable, NullWritable, Mutation>.Context context) throws IOException, InterruptedException {
        //先转成java的数据类型String
        String s = key.toString();
        String[] strings = s.split("_");
        String id = strings[0];
        String name = strings[1];

        //索引表本质上也是Hbase中的表，往索引表中写数据也是由Put构建的
        Put put = new Put(Bytes.toBytes(name));
        put.addColumn(Bytes.toBytes("info"),Bytes.toBytes(id),Bytes.toBytes(""));

        context.write(NullWritable.get(),put);

    }
}


/*
    使用MR构建二级索引表
 */
public class HBaseIndexDemo {
    public static void main(String[] args) throws Exception {
        //获取配置文件
        Configuration conf = new Configuration();
        //设置zookeeper集群信息
        conf.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181");

        //创建Job作业
        Job job = Job.getInstance(conf);

        //设置作业的名字
        job.setJobName("对学生表students构建二级索引");
        //设置主类
        job.setJarByClass(HBaseIndexDemo.class);

        //创建Scan对象
        Scan scan = new Scan();
        //设置读取的列簇
        scan.addFamily(Bytes.toBytes("info"));

        //利用Hbase提供的类去读取hbase中的表
        //读取原表数据
        //initTableMapperJob(String table, Scan scan,Class<? extends TableMapper> mapper,
        //      Class<?> outputKeyClass,
        //      Class<?> outputValueClass, Job job)
        TableMapReduceUtil.initTableMapperJob("students",scan,MyIndexMapper.class, Text.class, NullWritable.class,job);

        //读取目标索引表
        TableMapReduceUtil.initTableReducerJob("students_index",MyIndexReducer.class,job);

        //提交作业并执行
        boolean b = job.waitForCompletion(true);
        if(b){
            System.out.println("=====================================================================");
            System.out.println("students_index单列索引表构建成功！！！！请在Hbase中查看！");
            System.out.println("=====================================================================");
        }else {
            System.out.println("=====================================================================");
            System.out.println("students_index单列索引表构建失败！！！！");
            System.out.println("=====================================================================");
        }


    }
}
