package org.hyf.inspur.LessonDesin.clear.count3;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.hyf.inspur.LessonDesin.clear.tools.TProperties;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;

public class TopNReducer extends Reducer<Text, Text, NullWritable, Text> {
    private Text result = new Text();
    private int topvalue ;
    private int topn ;
    //获取参数
    @Override
    protected void setup( Reducer<Text, Text, NullWritable, Text>.Context context) throws IOException, InterruptedException {
        //排序值：访问次数
        topvalue = Integer.parseInt(context.getConfiguration().get("topvalue"));
        //取N值：默认值为5
        topn = Integer.parseInt(context.getConfiguration().get("topn"));
    }

    //reduce排序取值
    public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        TreeMap<Integer, String> topMap = new TreeMap<Integer, String>();
        //循环数据
        for (Text val : values) {
            String[] str = val.toString().split(TProperties.getValue("fileoutsplit"));
            //将数据放入数据集合
            topMap.put(Integer.parseInt(str[topvalue]), val.toString());
            //移除超过取值条数的较小数据
            if(topMap.size() > topn){
                topMap.remove(topMap.firstKey());
            }
        }
        int num = 0;
        Iterator<Integer> iterator = topMap.keySet().iterator();
        //数据转换，计算排名
        List<String> list = new ArrayList<String>();
        while (iterator.hasNext()) {
            list.add(topMap.get(iterator.next()));
        }
        //取数据条数+1，用于计算排名
        num = list.size() + 1 ;
        for (String s : list) {
            //排名
            num = num - 1;
            //行为ID，用户ID，PV（访问次数），排名
            result = new Text(s + TProperties.getValue("outfilesplit") + num);
            context.write(NullWritable.get(), result);
        }
    }
}

