package com.bigdata.hbase_mr.write;

import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * keyin: 行号 是 LongWritable 不是 IntWritable
 * IntWritable implements WritableComparable: 有序列化和排序的能力
 * valuein: 一行的数据
 * 要数据单词，单词出现的次数
 */
public class ClazzMapper extends TableMapper<Text, IntWritable> {

    // 引用类型，传递到方法中的是 地址值
    private Text mkey = new Text();
    private IntWritable mval = new IntWritable(1);


    @Override
    protected void map(ImmutableBytesWritable key, Result result, Mapper<ImmutableBytesWritable, Result, Text, IntWritable>.Context context) throws IOException, InterruptedException {

        processResult(result);

        String clazz = Bytes.toString(result.getValue("info".getBytes(), "clazz".getBytes()));
        mkey.set(clazz);
        context.write(mkey, mval);
    }


    /**
     * 打印一个result对象中的数据
     *
     * @param result
     */
    private static void processResult(Result result) {
        // 获取rowkey
        String rowkey = Bytes.toString(result.getRow());

//        String name = new String(result.getValue("info".getBytes(), "name".getBytes()));
        // 列不存在，不会出现空指针
        String name = Bytes.toString(result.getValue("info".getBytes(), "name".getBytes()));
        String age = Bytes.toString(result.getValue("info".getBytes(), "age".getBytes()));
        String gender = Bytes.toString(result.getValue("info".getBytes(), "gender".getBytes()));
        String clazz = Bytes.toString(result.getValue("info".getBytes(), "clazz".getBytes()));

        System.out.println("id:" + rowkey + "---name:" + name + "---age:" + age
                + "---gender:" + gender + "---clazz:" + clazz);
    }
}