package mrdemo007;

import java.io.IOException;
import java.util.Arrays;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class TopNReduce extends Reducer<IntWritable,IntWritable, Text, IntWritable>{
	
	int len;
	int[] topN;
	
	 @Override
	protected void reduce(IntWritable k2, Iterable<IntWritable> v2s,
			Reducer<IntWritable,IntWritable, Text, IntWritable>.Context context)
			throws IOException, InterruptedException {
		for(IntWritable item : v2s) {
			//这里可能有相同大小的数据，必须循环v2s输出
			add(item.get()); 
		}
	 }
	 
	 private void add(int num) {
		//加入num到topN数组，并排序，确保数组元素值是升序，这样数组的元素第1到最后一个元素是我们需要的数据
		//数组的topN[0] 先放置新进来的元素，然后排序（升序）
		topN[0] = num;
		Arrays.sort(topN);
	}
	 	
	 @Override
	protected void setup(Reducer<IntWritable, IntWritable, Text, IntWritable>.Context context)
			throws IOException, InterruptedException {
		 len = context.getConfiguration().getInt("N", 5);
		 //topN数组的初始化都是0
		 topN = new int[len + 1];
	}
	 
	 @Override
	protected void cleanup(Reducer<IntWritable, IntWritable, Text, IntWritable>.Context context)
			throws IOException, InterruptedException {
		 for(int i = len ; i > 0 ; i--) {
			 context.write(new Text(String.valueOf(len - i + 1)), new IntWritable(topN[i]));
		 }
	}
	 
	
	 
}
