package com.foton.bigdatastudy.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.springframework.asm.SpringAsmInfo;

import java.io.IOException;

/**
 * @Author RenPu
 * @Date 2023/8/21 16:54
 * @Version 1.0
 * @Description: driver: mapper和reduce联系的桥梁
 **/
public class WordCountDriver {

    public static void main(String[] args) throws Exception {

        //1:获取job
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        //2:设置jar连接
        job.setJarByClass(WordCountDriver.class);

        //默认使用

        //3:关联mapper和reduce
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReduce.class);

        //4:设置输出的k,v类型
         job.setMapOutputKeyClass(Text.class);
         job.setOutputValueClass(IntWritable.class);

        //5:设置最终输出的k,v类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //6:设置输入路径和输出路径（输出路径必须在磁盘是不存的，否则校验不通过执行失败）
        FileInputFormat.setInputPaths(job,new Path("D:\\hadoop\\inputword"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\hadoop\\output2222"));

        //7：提交job
        boolean result = job.waitForCompletion(true);

        System.exit(result?0:1);


    }


}
