package mrdemo001;

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

// Text, IntWritable, Text, IntWritable   这四个是：  k2,v2,k3,v3  的类型 ，其中特别注意，这里的k2,v2和map函数中的输出是一样
public class WordCountReduce extends Reducer<Text, IntWritable, Text, IntWritable>{
	
	@Override
	protected void reduce(Text k2, Iterable<IntWritable> v2s,
			Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
	
		//System.out.println("reduce方法执行了");
		
		int sum = 0 ;
		Iterator<IntWritable> iterator = v2s.iterator();
		while(iterator.hasNext()) {
			IntWritable next = iterator.next();
			int value = next.get();
			sum += value;
		}
		//这里输出一次 <k3,v3> 这里k3的值就是k2的值，v3就是求出的该单词的个数
		context.write(k2, new IntWritable(sum));
	
	}
}
