package yz.mr.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 整个运行程序（job）的入口类，在该类的main函数中可以定义job的一些配置信息
 *      自定义Mapper类和Reduce类 输入数据路径和输出数据路径
 */
public class WordCount {
    public static void main(String[] args) throws Exception{

        //1.创建Job操作对象
        //2.创建配置类对象
        Configuration entries = new Configuration();
        Job job = Job.getInstance(entries);

        //3.配置job
        job.setJobName("WordCount");
        job.setJarByClass(WordCount.class);

        //4.设置Mapper以及输出类型
        job.setMapperClass(WordCountMap.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //5.设置Reduce以及输出对象
        job.setReducerClass(WordCountReducer.class);

        //6.设置最终地输出数据类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //7.设置输入输出路径(这是本地的路径)  输入路径既可以指定路径也可以指定目录
//        FileInputFormat.addInputPath(job,new Path("src/main/dataFile/apiwolds.txt"));
//        FileOutputFormat.setOutputPath(job,new Path("output/wordcount"));

        //Hadoop集群执行路径
        FileInputFormat.addInputPath(job,new Path("/input"));
        FileOutputFormat.setOutputPath(job,new Path("/output/wordcount"));

        //8.提交执行当前JOB
        job.waitForCompletion(true);

    }
}
