package com.bclz.cat.uniquekey;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

/**
 * 统计cat数据中数量最多的TopN猫
 * 输入[id,name,count]----------具有唯一键
 *
 * 采用先统计每个map中TopN,再reduce阶段归并统计这些map中的TopN方式
 *
 */
public class CatApplication extends Configured implements Tool {

    private static Logger log = Logger.getLogger(CatApplication.class);

    @Override
    public int run(String[] args) throws Exception {


        Configuration conf=getConf();
        if(args.length==3){
            conf.set("hadoop.top.n",args[2]);
        }
        Job job = Job.getInstance(conf, "CAT STATISTIC TOPN");

        job.setJarByClass(getClass());
        job.setMapperClass(CatMapper.class);
        job.setReducerClass(CatReducer.class);

        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));

        boolean b = job.waitForCompletion(true);
        return b?0:1;
    }

    public static void main(String[] args) throws Exception {

        if(args.length<2){
            throw new IllegalArgumentException("Usage: CatApplication [inputdir] [outputdir] {TopN}");
        }

        log.info(String.format("Input Path:%s \n Output Path:%s \n TOPN: %s",args[0],args[1],args.length>2?args[2]:10));

        int code = ToolRunner.run(new CatApplication(), args);

        System.exit(code);

    }
}
