package cn.tedu.wordcount;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

//1.继承Reducer
/**
 * Reducer的KEYIN和VALUEIN是Mapper的KEYOUT和VALUEOUT
 * Reducer的输出类型是统计单词出现的次数
 * 即，单词和次数，Text和IntWritable
 * @author waqwb
 *
 */
public class WordCountReducer
		extends Reducer<Text,IntWritable,Text,IntWritable>{
	//2.重写reduce
	@Override
	protected void reduce(Text key, Iterable<IntWritable> values,
						  Reducer<Text, IntWritable, Text, IntWritable>.Context context)
			throws IOException, InterruptedException {
		//3.声明一个变量来计数
		int sum = 0;
		//4.遍历迭代器，求和
		for (IntWritable intWritable : values) {
			//5.求和
			sum += intWritable.get();
		}
		//6.写入结果
		context.write(key, new IntWritable(sum));
	}
}




