package data_manipulate;

/*
> original data file is arranged as :
a 5
b 4
c 74
d 78
e 1
r 64
f 4
> we want to sort each entry with its second row
 */

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.IOException;
import java.net.URI;
import java.util.StringTokenizer;

public class Sort2 {

    public static String inputPath = "hdfs://localhost:9000/sort/sort2";
    public static String outputPath = "hdfs://localhost:9000/sort/sort_output2";

    public static void main(String[] args)
            throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(URI.create(inputPath), conf);
        if (!fs.exists(new Path(inputPath))) {
            System.out.println("source file does not exists!");
            System.out.println("now exiting ...");
            System.exit(1);
        }
        if (fs.exists(new Path(outputPath))) {
            System.out.println("output file already exists!");
            System.out.println("now deleting this output file ...");
            fs.delete(new Path(outputPath), true);
        }
        Job job = Job.getInstance(conf, "sort with second row");
        // one method to prevent mapreduce do post-reduce sorting
        //job.setNumReduceTasks(1);

        job.setJarByClass(Sort2.class);
        job.setMapperClass(doMapper.class);
        job.setMapOutputValueClass(Text.class);
        job.setMapOutputKeyClass(IntWritable.class);

        job.setReducerClass(doReducer.class);
        job.setOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);

        FileInputFormat.setInputPaths(job, new Path(inputPath));
        FileOutputFormat.setOutputPath(job, new Path(outputPath));

        job.setSortComparatorClass(doComparator.class);
        job.setPartitionerClass(doPartitioner.class);
        // starting job
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

    public static class doComparator extends WritableComparator {

        // necessary, specify which implementation of comparator should be assigned
        public doComparator() {
            super(IntWritable.class, true);
        }

        @Override
        public int compare(WritableComparable a, WritableComparable b) {
            return -super.compare(a, b);
        }
    }

    public static class doPartitioner<K, V> extends Partitioner<K, V> {
        @Override
        public int getPartition(K key, V value, int numReduceTasks) {
            //这里我认为大于maxValue的就应该在第一个分区
            int maxValue = 50;
            int keySection = 0;
            // 只有传过来的key值大于maxValue 并且numReduceTasks比如大于1个才需要分区，否则直接返回0
            if (numReduceTasks > 1 && key.hashCode() < maxValue) {
                int sectionValue = maxValue / (numReduceTasks - 1);
                int count = 0;
                while ((key.hashCode() - sectionValue * count) > sectionValue) {
                    count++;
                }
                keySection = numReduceTasks - 1 - count;
            }
            return keySection;
        }
    }

    public static class doMapper extends Mapper<LongWritable, Text, IntWritable, Text> {

        private IntWritable num = new IntWritable();
        private Text word = new Text();

        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, IntWritable, Text>.Context context)
                throws IOException, InterruptedException {
            // do not use StringTokenizer class since it's deprecated, use String split instead
            String[] splits = value.toString().trim().split(" ");
            System.out.println(splits[0] + splits[1]);
            word.set(splits[0]);
            num.set(Integer.parseInt(splits[1]));
            context.write(num, word);
        }
    }

    public static class doReducer extends Reducer<IntWritable, Text, Text, IntWritable> {
        @Override
        protected void reduce(IntWritable key, Iterable<Text> values, Reducer<IntWritable, Text, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
            // the only task for this reducer is to swap position
            for (Text value : values) {
                context.write(value, key);
            }
        }
    }


}
