package com.itheima.partition;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

//简单的实现
public class CaiPiaoMain {
    public static void main(String[] args) throws Exception{
        //1、创建job对象

        Job job = Job.getInstance(new Configuration(),"彩票MR");

        //如果要在集群中运行
        job.setJarByClass(CaiPiaoMain.class);

        //2、分装，天龙八部
        //2.1设置输入类
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path(args[0]));

        //2.2设置自定义的map类和map的k2 v2的类型
        job.setMapperClass(CaiPiaoMapperTask.class);
        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(Text.class);

        //2.3设置shuffle分区
        job.setPartitionerClass(MyPartitioner.class);

        //2.4设置排序，规约，分组

        //2.7设置ReduceTask类和k3 v3类型
        job.setReducerClass(CaiPiaoReducerTask.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        //2.8 设置输出类
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path(args[1]));

        //3、由于使用了分区，所以reduceTask必须为2
        job.setNumReduceTasks(2);

        //提交任务
        boolean flag = job.waitForCompletion(true);

        //退出程序
        System.exit(flag?0:1);


    }
}
