package com.allen.mapreduce.wordcount;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.map.InverseMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.lang.module.Configuration;

public class main {
    boolean exit = false;
    String skipfile = null; //stop-file path
    int min_num = 0;
    String tempDir = "wordcount-temp-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE));

    Configuration conf;

    //获取停词文件的路径。并放到DistributedCache中
	    for(int i=0;i<args.length;i++)
    {
        if("-skip".equals(args[i]))
        {
            DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
            System.out.println(args[i]);
        }
    }

    //获取要展示的最小词频
	    for(int i=0;i<args.length;i++)
    {
        if("-greater".equals(args[i])){
            min_num = Integer.parseInt(args[++i]);
            System.out.println(args[i]);
        }
    }

    //将最小词频值放到Configuration中共享
		conf.set("min_num", String.valueOf(min_num));	//set global parameter

		try{
        /**
         * run first-round to count
         * */
        Job job = new Job(conf, "jiq-wordcountjob-1");
        job.setJarByClass(WordCount.class);

        //set format of input-output
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(SequenceFileOutputFormat.class);

        //set class of output's key-value of MAP
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //set mapper and reducer
        job.setMapperClass(WordCountMap.class);
        job.setReducerClass(WordCountReduce.class);

        //set path of input-output
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(tempDir));



        if(job.waitForCompletion(true)){
            /**
             * run two-round to sort
             * */
            //Configuration conf2 = new Configuration();
            Job job2 = new Job(conf, "jiq-wordcountjob-2");
            job2.setJarByClass(WordCount.class);

            //set format of input-output
            job2.setInputFormatClass(SequenceFileInputFormat.class);
            job2.setOutputFormatClass(TextOutputFormat.class);

            //set class of output's key-value
            job2.setOutputKeyClass(IntWritable.class);
            job2.setOutputValueClass(Text.class);

            //set mapper and reducer
            //InverseMapper作用是实现map()之后的数据对的key和value交换
            //将Reducer的个数限定为1, 终于输出的结果文件就是一个
            /**
             * 注意，这里将reduce的数目设置为1个。有非常大的文章。
             * 由于hadoop无法进行键的全局排序，仅仅能做一个reduce内部
             * 的本地排序。
             所以我们要想有一个依照键的全局的排序。
             * 最直接的方法就是设置reduce仅仅有一个。
             */
            job2.setMapperClass(InverseMapper.class);
            job2.setNumReduceTasks(1); //only one reducer

            //set path of input-output
            try {
                FileInputFormat.addInputPath(job2, new Path(tempDir));
            } catch (IOException ex) {
                throw new RuntimeException(ex);
            }
            FileOutputFormat.setOutputPath(job2, new Path(args[1]));

            /**
             * Hadoop 默认对 IntWritable 按升序排序，而我们须要的是按降序排列。
             * 因此我们实现了一个 IntWritableDecreasingComparator 类,　
             * 并指定使用这个自己定义的 Comparator 类对输出结果中的 key (词频)进行排序
             * */
            job2.setSortComparatorClass(IntWritableDecreasingComparator.class);
            try {
                exit = job2.waitForCompletion(true);
            } catch (IOException ex) {
                throw new RuntimeException(ex);
            } catch (InterruptedException ex) {
                throw new RuntimeException(ex);
            } catch (ClassNotFoundException ex) {
                throw new RuntimeException(ex);
            }
        }
    }catch(Exception e){
        e.printStackTrace();
    }finally{

        try {
            //delete tempt dir
            FileSystem.get(conf).deleteOnExit(new Path(tempDir));
            if(exit) System.exit(1);
            System.exit(0);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public main() {
        conf = new Configuration();
    }
}
