package com.wyl.hw;


import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class WordCountReducer extends Reducer<IntWritable, IntWritable,IntWritable,IntWritable> {

    IntWritable number = new IntWritable();

    //此时的num必须为对象变量才能达到对所有key序号累加效果
    int num=1;
    @Override
    protected void reduce(IntWritable key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //1 遍历key对应的values，获取到所有key,此时key已经被默认按降序排好了
        for (IntWritable value : values) {
            // 2 直接输出当前key对应的sum值，结果就是单词出现的总次数
            number.set(num++);

            //3.按作业要求第一列为序号 第二列为原始数据
            context.write(number,key);
        }
    }
}
