package com.wc;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Iterator;

//	Reducer<key_in,value_in,key_out,value_out>
//  key_in,value_in 为获取map端的输出数据
//  key_out,value_out 为向reduce端的输入数据
//  这个就是做了局部（本机器下的map汇总）的汇总
public class WordCountCombine extends Reducer<Text, IntWritable, Text, IntWritable> {

    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        Iterator<IntWritable> iter = values.iterator();

        int result = 0;
        while (iter.hasNext()) {
            IntWritable value = iter.next();
            result += value.get();
        }
//        System.out.println("key:" + key.toString());
//        System.out.println("value:" + result);

        // write 到 Reduce
        context.write(key, new IntWritable(result));
    }
}