package com.lagou.homework.sort;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

// 输入: 必须是mapper的输出
// 输出: k 排序编号  v 数字
public class SortFileNumReducer extends Reducer<LongWritable, NullWritable,LongWritable,LongWritable> {

    // MapTask 和 ReduceTask 都会根据key对数据进行排序。属于hadoop默认行为

    // 排序顺序
    int i = 0;
    LongWritable index = new LongWritable();

    @Override
    protected void reduce(LongWritable key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
        // 遍历的顺序就是从小到大的排序  遍历values也可以排除两个重复数据带来的影响。
        for (NullWritable value : values) {
            i++;
            index.set(i);
            context.write(index,key);
        }
    }
}
