package com.topn2;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;


public class MyTopN {

    public static void main(String[] args) throws Exception {

//        1. 从本地加载配置文件得到配置对象
        Configuration conf = new Configuration(true);

        // 配置文件中的 mapreduce.framework.name 属性默认是 yarn，也就是集群模式
        conf.set("mapreduce.framework.name", "local");
        // windows异构平台运行
        conf.set("mapreduce.app-submission.cross-platform", "true");

        GenericOptionsParser parser = new GenericOptionsParser(conf, args);

        String[] othargs = parser.getRemainingArgs();

//        2. 使用配置对象创建Job
        Job job = Job.getInstance(conf);
//        3. 设置任务的主启动类，job名称
        job.setJarByClass(MyTopN.class);
        job.setJobName("MyTopN2");

//        4. 设置输入、输出路径
        Path inPath = new Path(othargs[0]);
        Path outPath = new Path(othargs[1]);
        TextInputFormat.addInputPath(job, inPath);

        // 如果输出路径存在，先删除
        if (outPath.getFileSystem(conf).exists(outPath)) {
            outPath.getFileSystem(conf).delete(outPath, true);
        }
        TextOutputFormat.setOutputPath(job, outPath);

//        5. 设置Mapper类、Mapper的输出key、value类型
        job.setMapperClass(TMapper.class);
        job.setMapOutputKeyClass(Tkey.class);
        job.setMapOutputValueClass(IntWritable.class);

//        6. 设置排序比较器、分区器、分组比较器
        job.setSortComparatorClass(TSortComparator.class);

        job.setPartitionerClass(TPartitioner.class);
        job.setGroupingComparatorClass(TGroupingComparator.class);

//        7. 设置reducer类
        job.setReducerClass(TReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

//        8. 提交任务
        job.waitForCompletion(true);
    }

}
