package com.foton.bigdatastudy.combineText;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * @Author RenPu
 * @Date 2023/8/21 16:54
 * @Version 1.0
 * @Description: driver: mapper和reduce联系的桥梁
 **/
public class WordCountDriver {

    public static void main(String[] args) throws Exception {

        //1:获取job
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        //2:设置jar连接
        job.setJarByClass(WordCountDriver.class);

        //3:关联mapper和reduce
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReduce.class);

        //4:设置输出的k,v类型
         job.setMapOutputKeyClass(Text.class);
         job.setOutputValueClass(IntWritable.class);


        //5:设置最终输出的k,v类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);


        //如果不设置InputFormat,默认使用是TextInputFormat:默认针对单个文件进行切割和逐行进行读取内容
        //CombineTextInputFormat具有将多个小文件合并处理的效果
        job.setInputFormatClass(CombineTextInputFormat.class);

        //设置虚拟缓存大小为4m
        CombineTextInputFormat.setMaxInputSplitSize(job,4194304);


        //6:设置输入路径和输出路径（输出路径必须在磁盘是不存的，否则校验不通过执行失败）
        FileInputFormat.setInputPaths(job,new Path("D:\\hadoop\\inputcombine"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\hadoop\\combinetext1"));

        //7：提交job
        boolean result = job.waitForCompletion(true);

        System.exit(result?0:1);


    }


}
