package yz.mr.partitioner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 需求：
 *      读取ReduceJoin的结果数据，将读取到的数据包装成学生对象，之后在将学生对象通过Reduce写出到多个文件中
 *              区分的逻辑是按照年龄，每个年龄都保存到一个文件中
 */
public class PartitionDriver {
    public static void main(String[] args) throws Exception{
        Configuration entries = new Configuration();
        Job job = Job.getInstance(entries);
        //3.配置job
        job.setJobName("PartitionDriver");
        job.setJarByClass(PartitionDriver.class);

        job.setMapperClass(PartitionDriverMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(StudentText.class);  //这里的Mapper和Reduce类型要一致

        //设置自定义分区
        job.setPartitionerClass(MyPartition.class);

        job.setReducerClass(PartitionDriverReduce.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(StudentText.class);

        //设置多个ReduceTask
        job.setNumReduceTasks(4);
        //7.设置输入输出路径(这是本地的路径)  输入路径既可以指定路径也可以指定目录
        FileInputFormat.addInputPath(job,new Path("output/ReduceCount"));
        FileOutputFormat.setOutputPath(job,new Path("output/PartitionDriver"));
        //8.提交执行当前JOB
        job.waitForCompletion(true);
    }
}
