package mapred.distinctData;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
public class DistinctJob {
    static class MapDistinctTask extends Mapper<LongWritable, Text, Text, NullWritable> {

        @Override
        protected void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            // 因为要对输入文件进行去重，并且，Map的输入中value字段是文件的真正内容
            // 利用Map的输出给洗牌阶段，洗牌阶段会自动对相同的key进行合并这个特点，实现了去除重复的key（也就是Map输入的value）
            // 由于此操作用不到Map输出的value，所以我们将其设置为null
            context.write(value, NullWritable.get());
        }
    }

    static class ReduceDistinctTask extends Reducer<Text, NullWritable, NullWritable, Text> {
        @Override
        protected void reduce(Text key, Iterable<NullWritable> values, Context context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            // 只需要将输入的内容，原样输出即可
            // 但是，只需要reduce输出的value即可，不需要reduce的key输出，所以key=null
            context.write(NullWritable.get(), key);
        }

    }
    public static void main(String[] args) {
        // TODO Auto-generated method stub
        Configuration conf = new Configuration();
        Job job = null;
        String[] otherArgs = null;
        try {
            otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        } catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }
        if (otherArgs.length != 2) {
            System.err.println("Usage: DistinctJob <in> <out>");
            System.exit(2);
        }
        try {
            job = Job.getInstance();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        job.setJobName("DistinctJob");
        job.setJarByClass(DistinctJob.class);

        job.setMapperClass(MapDistinctTask.class);
        job.setReducerClass(ReduceDistinctTask.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        try {
            FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
            FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        } catch (IllegalArgumentException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        } catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        try {
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (ClassNotFoundException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

    }

}
