package com.hw.mapreduce.service;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * @author hw
 * 
 * @Reduce阶段
 * 
 * Reduce阶段的输出将写入文件HDFS文本中
 * 
 * 输入：KEY-Text，VALUE-LongWritable
 * 
 * 输出：KEY-Text，VALUE-LongWritable
 */
public class WordReduce extends Reducer<Text,LongWritable, Text,LongWritable> {

	private LongWritable num = new LongWritable();
	/**
	 * @author hw
	 * @Description
	 * MapReduce框架会将Map阶段缠上的相同key分发到同一个reduce函数处理
	 * @param key
	 * 第一个Text: 是传入的单词名称，是Mapper中传入的
	 * @param values
	 * 第二个：LongWritable 是同一个key的值的集合
	 * @param context
	 * 第三个Text: 是输出到文本中的内容
	 * @throws InterruptedException 
	 * @throws IOException 
	 */
	public void reduce(Text key,Iterable<LongWritable> values,Context context) throws IOException, InterruptedException{
		
		long count= 0;
		for(LongWritable v:values){
			count+=v.get();
		}
		num.set(count);
		context.write(key, num);
	}

}
