package org.example.hadoop;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.slf4j.LoggerFactory;

import java.io.IOException;

/**
 * @Author: Lian Jingwei
 * @Date: 2020/7/11 19:37
 * @Version: 1.0
 */
public class Driver {

    public static void main(String[] args)  {
        try {
            // args[0]  输入路径的文件夹
            //args[1] 输出路径
            Configuration configuration = new Configuration();
            Job job = Job.getInstance(configuration, "sorted number");
            job.setJarByClass(Driver.class);
            // 指定 mapper
            job.setMapperClass(NumMapper.class);
            job.setMapOutputKeyClass(LongWritable.class);
            job.setMapOutputValueClass(NullWritable.class);
            // 指定 reducer
            job.setReducerClass(NumReducer.class);
            job.setOutputKeyClass(LongWritable.class);
            job.setOutputValueClass(LongWritable.class);
            // 指定 combiner
            //job.setCombinerClass(NumReducer.class);
            job.setNumReduceTasks(1);

            // 指定输入文件夹

          // job.setInputFormatClass(TextInputFormat.class);
           // FileInputFormat.setInputPaths(job, args[0]);
            FileInputFormat.setInputPaths(job,args[0]);

            // 指定 输出文件夹
            FileOutputFormat.setOutputPath(job,new Path("/tmp/out"));
            boolean flag = job.waitForCompletion(true);
            //jvm退出：正常退出0，非0值则是错误退出
            System.exit(flag ? 0 : 1);
        }catch (Exception e){
            LoggerFactory.getLogger(Driver.class).error(e.getMessage(),e);
        }
    }
}
