package main;

import comparator.TextDemoComparator;
import mapper.HadoopMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import reducer.HadoopReducer;

/**
 * HadoopMain
 *
 * @author zhangyimin
 * @version 1.0
 * @date 2018-07-20 上午10:04
 */

public class HadoopMain {




    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        Job job = Job.getInstance(configuration);
        job.setJarByClass(HadoopMain.class);
        job.setMapperClass(HadoopMapper.class);
//        使用合并效果
//        job.setCombinerClass(HadoopReducer.class);
//          使用分区
//        job.setPartitionerClass(EmployeePartition.class);
//        建立几个分区  将3传递给分区器的numPartition参数
//        job.setNumReduceTasks(3);
        job.setReducerClass(HadoopReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        //降序排序 默认为升序
        job.setSortComparatorClass(TextDemoComparator.class);
        //指定任务的输入和输出
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.waitForCompletion(true);
    }





}
