package com.example.hadoop.mapreduce.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * Created with IntelliJ IDEA.
 *
 * @Auther: Brian
 * @Date: 2020/04/21/9:30
 * @Description:
 * 相当于一个yarn集群客户端
 * 需要在这里封装MapReduce程序的运行参数，指定jar包，然后提交给yarn集群
 * 需要配置的运行参数有：
 * 指定jar包路径
 * 指定mapper和reducer业务类
 * 指定mapper的输出数据的Key，value类型
 * 指定最终输出数据的key， value类型
 * 指定数据源的路径
 * 指定最终输出的数据的存放路径
 */
public class WordCountDriver {
    public static void main(String[] args) throws Exception {
        //通过命令行输入参数
        if (null == args || args.length == 0) {
            args = new String[2];
            args[0] = "shizhan:9000/wordcount/input";
            args[1] = "shizhan:9000/wordcount/output";
        }
        //加载默认配置信息
        Configuration conf = new Configuration();

        //生成一个Job
        Job job = Job.getInstance(conf);
        //当程序运行时，通过类加载器获取类的所在路径
        job.setJarByClass(WordCountDriver.class);
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        //将job中配置的相关参数，以及job所用的java类所在的jar包，提交给yarn去运行
        /*job.submit();*/
        boolean successfully = job.waitForCompletion(true);
        System.exit(successfully ? 0 : 1);
    }
}
