package cn.ekgc.mapreduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/**
 * 希望让自己的程序支持 hadoop shell 参数
 * 利用了Hadoop的工具类或接口
 *
 * Tool借口定义了运行规范
 * Configured 类实现了读写hadoop configuration对象的规范
 * ToolRunner.run 静态方法辅助完成客户运行mapreduce初始化提交job并等等结果
 */
public class WordCount extends Configured implements Tool {

    /**
     * mapreduce 程序入口
     * @param args
     */
    public static void main(String[] args) throws Exception {

        int flag = ToolRunner.run(new WordCount(),args);

        System.out.println("exit with code "+flag);
    }

    /**
     * 设置job
     * @param args
     * @return
     * @throws Exception
     */
    @Override
    public int run(String[] args) throws Exception {
        //定义一个Job
        Job job = Job.getInstance(this.getConf(),"word count example");
        //设置工作的主类
        job.setJarByClass(WordCount.class);

        //设置 map (combiner)  reduce
        job.setMapperClass(MyMapper.class);
        job.setCombinerClass(MyReduce.class);
        job.setReducerClass(MyReduce.class);
        job.setPartitionerClass(MyPartitioner.class);
        job.setSortComparatorClass(MySort.class);
        //job.setCombinerKeyGroupingComparatorClass(MySort.class);
        //job.setGroupingComparatorClass(MySort.class);

        //设置输出类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        job.setNumReduceTasks(3);

        //要处理的资源目录
        FileInputFormat.addInputPath(job,new Path(args[0]));

        //判断计算结果输出目录是否存在,如果存在就删除
        Path out = new Path(args[1]);
        FileSystem fileSystem = FileSystem.get(this.getConf());
        if(fileSystem.exists(out)){
            fileSystem.delete(out,true);
        }
        //设置计算结果输出目录
        FileOutputFormat.setOutputPath(job,out);

        return job.waitForCompletion(true)?0:1;
    }

    /**
     * hadoop 中为了优化序列化问题对Java中的所有数据类型进行了重写 Long--LongWritable  Integer--IntegerWritable  String -- Text ...
     */
    public static class MyMapper extends Mapper<LongWritable, Text,Text,LongWritable>{
        private Text outKey =new Text();
        private LongWritable outValue = new LongWritable(1);

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // hadoop is a good framework       for big data
            String[] words = value.toString().split("\\s+"); //\\s+ 正则表达式
            for (String word:words){
                //key(word) value(1)  out
                outKey.set(word);
                context.write(outKey,outValue); //eg: hadoop 1
            }

        }
    }

    public static class MyReduce extends Reducer<Text,LongWritable,Text,LongWritable>{
        private LongWritable outValue=new LongWritable();
        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {

            int sum = 0;
            for(LongWritable value:values){
                sum+=value.get();
            }
            outValue.set(sum);
            context.write(key,outValue);
        }
    }

    public static class MyPartitioner extends Partitioner<Text,LongWritable>{
        @Override
        public int getPartition(Text text, LongWritable longWritable, int i) {
            if(text.toString().length()==4){
                return 0;
            }
            if(text.toString().length()==6){
                return 1;
            }
            return 2;
        }
    }


    public static class MySort extends WritableComparator {
        public MySort() {
            super(Text.class);
        }

        public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
            int n1 = WritableUtils.decodeVIntSize(b1[s1]);
            int n2 = WritableUtils.decodeVIntSize(b2[s2]);
            return -compareBytes(b1, s1 + n1, l1 - n1, b2, s2 + n2, l2 - n2);
        }
    }

    /*public static class Person implements WritableComparable<Person> {
        private String name;
        private Integer age;

        @Override
        public int compareTo(Person o) {
            return 0;
        }

        @Override
        public void write(DataOutput dataOutput) throws IOException {

        }

        @Override
        public void readFields(DataInput dataInput) throws IOException {

        }
    }*/


}
