package com.nx.qwr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import java.io.IOException;

/**
 * @Author: Qingwr
 * @Date: 2022/8/14 21:38
 * @Description
 * 需求: 按第二列排倒序
 * 13480253104	41580	41580	2494800	2494800
 * 13502468823	790020	1413720	101663100	1529437140
 * 13560436666	249480	207900	15467760	13222440
 * 13560439658	457380	332640	28191240	81663120
 * 13602846565	207900	166320	26860680	40332600
 * 13660577991	332640	124740	96465600	9563400
 */
public class JobMain {

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration entries = new Configuration();
        Job job = Job.getInstance(entries, "sortFlow");

        //源数据
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job, new Path(args[0]));

        //Mapper
        job.setMapperClass(FlowMapper.class);
        job.setMapOutputKeyClass(FlowBean.class);
        job.setMapOutputValueClass(NullWritable.class);

        //Reducer
        job.setReducerClass(FlowReducer.class);
        job.setOutputKeyClass(FlowBean.class);
        job.setOutputValueClass(NullWritable.class);

        //输出路径
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job, new Path(args[1]));

        //提交任务
        boolean b = job.waitForCompletion(true);
        System.out.println(b ? 0 : 1);
    }
}
