package com.mano.demo.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;

/**
 * @Author: zj
 * @Description:
 * @Date: Created in 14:32 2020/9/9
 * @Modified By:
 */
public class WordCountMapReduceDemo extends Configured implements Tool {

    public static class Map extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable>{

        static enum Counters { INPUT_WORDS }

        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
        private boolean caseSensitive = true;
        private Set<String> patternToSkip = new HashSet<String>();

        private long numRecords = 0;
        private String inputFile;


        @Override
        public void configure(JobConf job) {
            caseSensitive = job.getBoolean("wordcount.case.sensitive",true);
            inputFile= job.get("map.input.file");
            if(job.getBoolean("wordcount.skip.patterns",false)){
                Path[] patternsFiles = new Path[0];
                try {
                    patternsFiles = DistributedCache.getLocalCacheFiles(job);
                } catch (IOException e) {
                    e.printStackTrace();
                }

                for (Path patternsFile:patternsFiles) {
                    parseSkipFile(patternsFile);
                }
            }
        }

        private void parseSkipFile(Path patternsFile) {

            try {
                BufferedReader reader = new BufferedReader(new FileReader(patternsFile.toString()));
                String pattern = null;
                while((pattern=reader.readLine())!=null){
                    patternToSkip.add(pattern);
                }

            } catch (FileNotFoundException e) {
                e.printStackTrace();
            } catch (IOException e) {
                e.printStackTrace();
            }

        }

        @Override
        public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {

            String line = caseSensitive? value.toString():value.toString().toLowerCase();
            for (String pattern:patternToSkip) {
                line = line.replaceAll(pattern,"");
            }

            StringTokenizer tokenizer = new StringTokenizer(line);
            while(tokenizer.hasMoreTokens()){
                word.set(tokenizer.nextToken());
                output.collect(word,one);
                reporter.incrCounter(Counters.INPUT_WORDS,1);
            }

            if((++numRecords%100)==0){
                reporter.setStatus("Finished processing "+numRecords+" records"+" from the input file:"+inputFile);
            }

        }

    }


    public static class Reduce extends MapReduceBase implements Reducer<Text,IntWritable,Text,IntWritable>{

        @Override
        public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
            int sum = 0;
            while(values.hasNext()){
                sum += values.next().get();
            }

            output.collect(key,new IntWritable(sum));
        }
    }



    @Override
    public int run(String[] args) throws Exception {
        JobConf conf = new JobConf(getConf(), WordCountMapReduceDemo.class);
        conf.setJobName("wordcount");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);
        conf.setMapperClass(Map.class);
        conf.setCombinerClass(Reduce.class);
        conf.setReducerClass(Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        List<String> other_args = new ArrayList<String>();
        for (int i = 0; i < other_args.size(); i++) {
            if("_skip".equals(args[i])){
                DistributedCache.addCacheFile(new Path(args[++i]).toUri(),conf);
                conf.setBoolean("wordcount.skip.pattern",true);
            }else{
                other_args.add(args[i]);
            }
        }

        FileInputFormat.setInputPaths(conf,new Path(other_args.get(0)));
        FileOutputFormat.setOutputPath(conf,new Path(other_args.get(1)));

        JobClient.runJob(conf);

        return 0;

    }

    public static void main(String[] args) {
        try {
            int ret = ToolRunner.run(new Configuration(),new WordCountMapReduceDemo(),args);
            System.exit(ret);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
