package yz.mr.CombineFile;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class CombineFIle {
    public static void main(String[] args) throws Exception{

        //1.创建Job操作对象
        //2.创建配置类对象
        Configuration entries = new Configuration();
        Job job = Job.getInstance(entries);

        //3.配置job
        job.setJobName("CombineFIle");
        job.setJarByClass(CombineFIle.class);

        //4.设置Mapper以及输出类型
        job.setMapperClass(CombineFIleMap.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        //5.设置Reduce以及输出对象
        job.setReducerClass(CombineFIleReduce.class);

        //6.设置最终地输出数据类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //该类可以实现对小类进行合并
        //job.setInputFormatClass(CombineTextInputFormat.class);
        //2023-08-09 22:25:06,739 INFO org.apache.hadoop.mapreduce.JobSubmitter: number of splits:4
        //这是小文件的合不合并的效果，合并之后4个切片，不合并之前7个切片
        //2023-08-09 22:28:01,123 INFO org.apache.hadoop.mapreduce.JobSubmitter: number of splits:7


        //设置切片大小    减小切片大小使用setMaxInputSplitSize()
        CombineTextInputFormat.setMaxInputSplitSize(job,3145728L);


        //本地执行路径
        FileInputFormat.addInputPath(job,new Path("src/main/dataFile/Words"));
        FileOutputFormat.setOutputPath(job,new Path("output/Combine3"));

        //8.提交执行当前JOB
        job.waitForCompletion(true);

    }
}
