package com.tasks.task01;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class NumberSortDrive {
  public void Start(String inputPaths,String outputPath) throws IOException, ClassNotFoundException, InterruptedException{
    // 获取配置信息
    Configuration config = new Configuration();
    config.set("user", "root");
    // 根据配置信息创建job
    Job job = Job.getInstance(config);
    // 配置jar加载路径
    job.setJarByClass(NumberSortDrive.class);
    // 配置map与reduce
    job.setMapperClass(NumberSortMapper.class);
    job.setReducerClass(NumberSortReducer.class);
    // 配置map输出
    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    // 配置reduce输出
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(IntWritable.class);
    // 配置分区函数，默认情况下，reduce只有一个，所以分组只会有一个，为了明白点，这里显示指定一下。当然去掉分区器也能得到正确的结果
    job.setPartitionerClass(NumberSortPartition.class);
    // 显示指定reduce线程为1，默认是1，不指定也能得到正确的结果
    job.setNumReduceTasks(1);
    // 配置输入输出路径
    FileInputFormat.setInputPaths(job, new Path(inputPaths));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));
    // 跑起来
    boolean result = job.waitForCompletion(true);
    System.exit(result ? 0 : 1);
  }
}
