package club.drguo.hadoop.mapreduce.topkurl;

import java.io.IOException;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class TopKeyURLReducer extends Reducer<Text, FlowBean, Text, LongWritable>{
	//如果放在reduce里，每放一个url产生一个treemap
	private TreeMap<FlowBean, Text> treeMap = new TreeMap<>();
	//总流量
	private double globalCount = 0;
	//<url,{bean,bean...}>
	@Override
	protected void reduce(Text key, Iterable<FlowBean> values, Context context)
			throws IOException, InterruptedException {
		Text url = new Text(key.toString());
		long up_sum = 0;
		long down_sum = 0;
		for(FlowBean bean : values){
			up_sum += bean.getUp_flow();
			System.out.println("--------------"+up_sum);
			down_sum += bean.getDown_flow();
		}
		FlowBean bean = new FlowBean(up_sum, down_sum);
		globalCount += bean.getSum_flow();
		System.out.println("=============="+globalCount);
		treeMap.put(bean, url);//根据流量排序（flowbean），放到treemap中
	}
	//reduce任务即将退出时被调用一次
	@Override
	protected void cleanup(Reducer<Text, FlowBean, Text, LongWritable>.Context context)
			throws IOException, InterruptedException {
		Set<Entry<FlowBean, Text>> entrySet = treeMap.entrySet();
		double tempCount = 0;
		for(Entry<FlowBean, Text> ent : entrySet){
			//只写入占总流量的百分之八十的网站
			if(tempCount / globalCount < 0.8){
			context.write(ent.getValue(), new LongWritable(ent.getKey().getSum_flow()));
			tempCount += ent.getKey().getSum_flow();
			}else{
				return;
			}
		}
	}
}
