package wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.map.InverseMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;

public class WordCountDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        // 输入输出路径需要根据自己电脑上实际的输入输出路径设置
        String[] split = Thread.currentThread().getStackTrace()[1].getFileName().split("\\.");
        String inputPath = "d:/temp/"+split[0]+"/input";
        String outputPath = "d:/temp/"+split[0]+"/output";
        String sortOutputPath = "d:/temp/"+split[0]+"/sortoutput";
        args = new String[] { inputPath, outputPath,sortOutputPath};

        // 1 获取配置信息以及封装任务
        Configuration configuration = new Configuration();
        Job job1 = Job.getInstance(configuration);


        // 2 设置jar加载路径
        job1.setJarByClass(WordCountDriver.class);

        // 3 设置map和reduce类
        job1.setMapperClass(WordcountMapper.class);
        job1.setReducerClass(WordcountReducer.class);

        // 4 设置map输出
        job1.setMapOutputKeyClass(Text.class);
        job1.setMapOutputValueClass(IntWritable.class);

        // 5 设置最终输出kv类型
        job1.setOutputKeyClass(Text.class);
        job1.setOutputValueClass(IntWritable.class);

        // 6 设置输入和输出路径
        FileInputFormat.setInputPaths(job1, new Path(args[0]));
        FileOutputFormat.setOutputPath(job1, new Path(args[1]));
        job1.setOutputFormatClass(SequenceFileOutputFormat.class);
        // 7 提交
        boolean result = job1.waitForCompletion(true);
        if(result)
        {
            Job sortJob = Job.getInstance(configuration);
            sortJob.setJarByClass(WordCountDriver.class);

            FileInputFormat.addInputPath(sortJob, new Path(outputPath));
            sortJob.setInputFormatClass(SequenceFileInputFormat.class);

            /*InverseMapper由hadoop库提供，作用是实现map()之后的数据对的key和value交换*/
            sortJob.setMapperClass(InverseMapper.class);
            /*将 Reducer 的个数限定为1, 最终输出的结果文件就是一个。*/
            sortJob.setNumReduceTasks(1);
            FileOutputFormat.setOutputPath(sortJob, new Path(args[2]));

            sortJob.setOutputKeyClass(IntWritable.class);
            sortJob.setOutputValueClass(Text.class);
            /*Hadoop 默认对 IntWritable 按升序排序，而我们需要的是按降序排列。
             * 因此我们实现了一个 IntWritableDecreasingComparator 类,　
                 * 并指定使用这个自定义的 Comparator 类对输出结果中的 key (词频)进行排序*/
            sortJob.setSortComparatorClass(IntWritableDecreasingComparator.class);

            System.exit(sortJob.waitForCompletion(true) ? 0 : 1);
        }else {
            System.exit(1);
        }



    }
}

