package com.atguigu.mapreduce.sorting;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * 提交job
 */
public class FlowCountDriver {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
//        if (args.length < 2){
//            System.out.println("输入参数不足");
//            System.exit(2);
//        }
        // 1.配置对象
        Configuration configuration = new Configuration();
        // 2.创建job
        Job job = Job.getInstance(configuration);
        // 指定驱动类
        job.setJarByClass(FlowCountDriver.class);
        // 3.指定mapper、reducer
        job.setMapperClass(FlowCountMapper.class);
        job.setReducerClass(FlowCountReducer.class);
        // 4.mapper阶段输出的key value类型
        job.setMapOutputKeyClass(FlowBean.class);
        job.setMapOutputValueClass(Text.class);
        // reducer阶段输出的key value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);
        // 设置分区个数
        job.setNumReduceTasks(5);
        job.setPartitionerClass(FlowPartitioner.class);

        // FlowWritableComparator(指定了就按照指定的来)
        job.setSortComparatorClass(FlowWritableComparator.class);
        // 输入目录
//        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileInputFormat.addInputPath(job, new Path("C:\\major\\SGG大数据线下全套资料2025\\05.尚硅谷大数据技术之Hadoop\\02.资料\\07_测试数据\\phone_data\\phone_data.txt"));
//        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        FileOutputFormat.setOutputPath(job, new Path("C:\\major\\SGG大数据线下全套资料2025\\05.尚硅谷大数据技术之Hadoop\\02.资料\\07_测试数据\\phone_data\\out3"));
        // 提交job
        job.waitForCompletion(true);
    }
}
