package com.kkb.hbase.mr.demo5GroupComparator;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class OrderMain extends Configured implements Tool {


    /**
     * k1 : LongWritable
     * v1 : Text
     * k2 : OrderBeen
     * v2 : NullWritable
     * k3 : OrderBeen
     * v3 : NullWritable
     *
     * @param args
     * @return
     * @throws Exception
     */
    @Override
    public int run(String[] args) throws Exception {
        //定义MapReduce的8个步骤， 然后通过job对象进行组装
        //获取job对象
        Configuration conf = super.getConf();
        Job job = Job.getInstance(conf, "OrderMaxPriseOut");

        //本地可以不设(设置程序运行的主类)
        job.setJarByClass(OrderMain.class);

        //通过job组装MapReduce的8个步骤
        //1.读取文件，解析成key,value对
        job.setInputFormatClass(TextInputFormat.class); //设置读取输入文件的格式
        TextInputFormat.addInputPath(job,new Path(args[0]));//设置读取文件路径,读取文件，解析成key，value对

        //2.自定义map逻辑，接收k1,v1装换成k2,v2
        job.setMapperClass(GroupMapper.class);
        //设置key,value对的属性 - - map阶段输出的key类型 k2,v2
        job.setMapOutputKeyClass(OrderBean.class);
        job.setMapOutputValueClass(NullWritable.class);

        //3.分区（使用自己定义分区器）
        job.setPartitionerClass(OrderPartitioner.class);

        //4.排序(在OrderBean里面做了)

        //5.规约(在每个Map端执行一次 reduce的逻辑)
        //job.setCombinerClass(HxkReducer.class);

        //6.分组 自定义
        job.setGroupingComparatorClass(OrderGroup.class);

        //7.自定义reduce逻辑
        job.setReducerClass(GroupReducer.class);
        //k3,v3的类型
        job.setOutputKeyClass(OrderBean.class);
        job.setOutputValueClass(NullWritable.class);
        //8.输出最终结果
        job.setOutputFormatClass(TextOutputFormat.class);
        //注意：输出路径 要不存在 否则报错
        TextOutputFormat.setOutputPath(job,new Path(args[1]));

        //提交整个job任务 等整个 任务结束之后 如果任务执行成功，返回true
        boolean b = job.waitForCompletion(true);
        //如果true 返回 0 表示整个任务执行成功
        return b?0:1;
    }

    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();

        //情况1
        //注释掉 configuration.set("mapreduce.job.queuename", "hadoop");
        //匹配规则：<rule name="primaryGroup" create="false" />

        //情况2
        configuration.set("mapreduce.job.queuename", "hadoop");
        //匹配规则：<rule name="specified" create="false"/>

        //情况3
        //configuration.set("mapreduce.job.queuename", "hadoopv1");
        //allocation文件中，注释掉<rule name="primaryGroup" create="false" />；刷新配置yarn rmadmin -refreshQueues
        //匹配规则：<rule name="default" queue="root.default"/>

        int run = ToolRunner.run(new Configuration(), new OrderMain(), args);
        System.exit(run);
    }

}
