package mapred.topN;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.TreeMap;

public class TopNJob {
    static class MapTopNTask extends Mapper<LongWritable, Text, NullWritable, Text> {
        // 类的全局成员作为当前Map对象排序的工具
        TreeMap<Integer, String> tree = new TreeMap<Integer, String>();

        @Override
        protected void cleanup(Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            System.out.println(tree);

            // 将treemap逆序后转换map类型，将tree对象中的（最多）10个元素输出到map对象的输出，给接下来的Reduce
            for (String t : tree.descendingMap().values()) {
                context.write(NullWritable.get(), new Text(t));
            }
        }

        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            // 对于输入中的每一行，转换为整型
            try {

                // 将转换后的整型值作为Key，将转换前的string类型的值作为value，放入TreeMap对象中
                // 这样就实现了将此Map对象要处理的一系列数据进行了自动排序
                // 这里有特点：key和value是一样的值，这是类型不同
                tree.put(Integer.parseInt(value.toString()), value.toString());
            } catch (NumberFormatException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }

            // 对于当前Map对象接收到的这一系列数据，每次添加到TreeMap对象后，都判断是否执行移除第一个key
            if (tree.size() > 10) {
                tree.remove(tree.firstKey());
            }
            // 在此函数中，没有执行任何的map输出的命令
        }

        @Override
        protected void setup(Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            super.setup(context);
        }
    }

    static class TopNReudce extends Reducer<NullWritable, Text, NullWritable, Text> {
        TreeMap<Integer, String> tree = new TreeMap<Integer, String>();

        @Override
        protected void cleanup(Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            // 遍历已经利用TreeMap排序后筛选的topN的元素，将他们输出到Reducer的输出结果中
            for (String t : tree.descendingMap().values()) {
                context.write(NullWritable.get(), new Text(t));
            }
        }

        @Override
        protected void reduce(NullWritable key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub

            // 当前reduce函数只会执行一次
            // 遍历value的数组，将所有的值写入TreeMap对象中
            for (Text text : values) {
                tree.put(Integer.parseInt(text.toString()), text.toString());
                // 如果TreeMap对象的Key的数量大于10，那么移除第一个
                if (tree.size() > 10) {
                    tree.remove(tree.firstKey());
                }
            }
            // 在reduce()中，并没有执行reduce过程的输出操作

        }
    }
    public static void main(String[] args) {
        // TODO Auto-generated method stub
        Configuration conf = new Configuration();
        Job job = null;
        String[] otherArgs = null;
        try {
            otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        } catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }
        if (otherArgs.length != 2) {
            System.err.println("Usage: TopNJob <in> <out>");
            System.exit(2);
        }
        try {
            job = Job.getInstance();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        job.setJobName("TopN");
        job.setJarByClass(TopNJob.class);
        job.setMapperClass(MapTopNTask.class);
        job.setReducerClass(TopNReudce.class);

        // Map的输出和Reduce的输出类型可以单独指定
        // 如果不设置setMapOutputXXXX，那么Map的输出类型和Reduce的输出类型必须一致
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);
        try {
            FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
            FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        } catch (IllegalArgumentException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        } catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        try {
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (ClassNotFoundException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
}
