package com.my.mr.NumSort;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class NumSortReduce extends Reducer<IntWritable, NullWritable,IntWritable,IntWritable> {
    //返回的key
    IntWritable resultKey=new IntWritable();
    //排序字段
    int count=0;


    @Override
    protected void reduce(IntWritable key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
        //循环values是输出重复的key，可能会有重复的key
        //在shuffle阶段，会自动排序，所以不需要操作任何事情
        for (NullWritable value : values) {
            count ++;
            resultKey.set(count);
            //输出 key为排序，value为原本的值
            context.write(resultKey,key);
        }
    }
}
