package com.deng.hadoop.mapreduce;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * Created By DengLibin
 * Date: 19-4-1
 * Time: 上午9:35
 * Desc: 单词计数reducer,reducer的输入就是map的输出
 * Shuffle: Reducer copies the sorted output from each  Mapper using HTTP across the network
 *          Reducer 使用通过网络使用http协议获取每个Mapper的输出。
 * Sort: The framework merge sorts Reducer inputs by key (Since different Mapper may have output the same key)
            The shuffle and sort phases occur simultaneously i.e. while outputs are
            being fetched they are merged.
         框架会对reducer的输入按照key进行排序（可能会有相同的key）,Shuffle和Sort阶段是同时进行的。

 shuffle的过程分析：http://www.cnblogs.com/ahu-lichang/p/6665242.html
 */
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

    private IntWritable result = new IntWritable(0);//reduce输出结果value(单词的数量)

    /**
     * map的输出（key-value）作为reduce的输入，相同的key（单词）作为一组，对应一个reduce程序（调用reduce方法）
     * @param key
     * @param values
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //super.reduce(key, values, context);
        int sum = 0;//计数
        for(IntWritable num : values){
            sum += num.get(); //相同的单词次数求和
        }
        result.set(sum);
        context.write(key, result);
    }
}
