package com.leadbank.bigdata.mapreduce.batch;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;

/**
 * Created by hp on 2018/5/14.
 */
public class BatchImportReducer extends
        TableReducer<LongWritable, Text, NullWritable> {
    protected void reduce(LongWritable key,
                          java.lang.Iterable<Text> values, Context context)
            throws java.io.IOException, InterruptedException {
        for (Text text : values) {
            final String[] splited = text.toString().split("\t");

            final Put put = new Put(Bytes.toBytes(splited[0]));
            put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("date"),
                    Bytes.toBytes(splited[1]));
            put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("msisdn"),
                    Bytes.toBytes(splited[2]));
            // 省略其他字段，调用put.add(....)即可
            context.write(NullWritable.get(), put);
        }
    };
}