package com.shujia.advance;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

class MyMapper extends TableMapper<Text, NullWritable> {
    @Override
    protected void map(ImmutableBytesWritable key, Result value, Mapper<ImmutableBytesWritable, Result, Text, NullWritable>.Context context) throws IOException, InterruptedException {
        //ImmutableBytesWritable key 对应行键rowkey
        //Result value 对应一个单元格
        //取出行键的值
        String id = Bytes.toString(key.get());
        //由于我们要将学号与姓名组合写给reduce
        String name = Bytes.toString(value.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")));
        //<id_name,null>
        context.write(new Text(id + "_" + name), NullWritable.get());
    }
}

class MyReducer extends TableReducer<Text, NullWritable, NullWritable> {
    @Override
    protected void reduce(Text key, Iterable<NullWritable> values, Reducer<Text, NullWritable, NullWritable, Mutation>.Context context) throws IOException, InterruptedException {
        //<id_name,null>
        String key2 = key.toString();  //转成java中String类型
        String[] strings = key2.split("_");
        String id = strings[0];
        String name = strings[1];

        Put put = new Put(Bytes.toBytes(name));
        put.addColumn(Bytes.toBytes("info"),Bytes.toBytes(id),Bytes.toBytes(""));

        context.write(NullWritable.get(),put);
    }
}


public class HBaseIndexMR {
    public static void main(String[] args) throws Exception {
        //创建配置文件对象
        Configuration conf = new Configuration();
        //指定zookeeper的集群信息
        conf.set("hbase.zookeeper.quorum", "hadoop102:2181,hadoop103:2181,hadoop104:2181");

        //创建Job实例
        Job job = Job.getInstance(conf);

        //给job作业起名字
        job.setJobName("根据学生表构建二级索引");

        //指定主类名
        job.setJarByClass(HBaseIndexMR.class);

        //创建Scan对象
        Scan scan = new Scan();
        //告诉输入的列值来自于哪个列簇
        //addFamily(byte [] family)
        scan.addFamily(Bytes.toBytes("info"));

        //指定输入的类
        //public static void initTableMapperJob(String table, Scan scan,
        //      Class<? extends TableMapper> mapper,
        //      Class<?> outputKeyClass,
        //      Class<?> outputValueClass, Job job)
        //Map类主要作用就是获取列值和行键
        TableMapReduceUtil.initTableMapperJob("students_1", scan, MyMapper.class, Text.class, NullWritable.class, job);

        //指定reduce的输出类以及key-value类型
        //public static void initTableReducerJob(String table,
        //    Class<? extends TableReducer> reducer, Job job)
        TableMapReduceUtil.initTableReducerJob("student_index", MyReducer.class, job);


        //提交作业到集群运行
        boolean b = job.waitForCompletion(true);
        if (b) {
            System.out.println("===============================================================");
            System.out.println("********   索引表生成成功！！请到Hbase表中查看   *********");
            System.out.println("===============================================================");
        } else {
            System.out.println("===============================================================");
            System.out.println("********   索引表生成失败！！  *********");
            System.out.println("===============================================================");
        }


    }
}