package com.tledu.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.Arrays;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class MRPractice5 {
    public static class MRPractice5Mapper extends Mapper<Object, Text, Text, IntWritable> {

        private Text word = new Text();
        private IntWritable one = new IntWritable(1);
        private IntWritable blackVal = new IntWritable(-1);

        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            // 怎么把黑名单的内容去掉，在map阶段获取了一行一行的数据，判断这一行是不是黑名单的内容
            String fileName = ((FileSplit) context.getInputSplit()).getPath().getName();
            System.out.println("map阶段进行....");
            // 判断文件是黑名单还是普通文件
            if (fileName.startsWith("black")) {
                // 是黑名单文件， word做为或者key，value可以设置为-1
                System.out.println("black val:"+value.toString());
                context.write(value, blackVal);
            }else {
                // 普通的文件，设置为1
                StringTokenizer str = new StringTokenizer(value.toString(),",");
                while (str.hasMoreTokens()) {
                    word.set(str.nextToken());
                    context.write(word,one);
                }
            }
        }
    }

    public static class MRPractice5Reducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            boolean isInBlack = false;
            for (IntWritable val : values) {
                System.out.println("reduce val:"+val.get());
                if (val.get() == -1) {
                    // 说明在黑名单
                    isInBlack = true;
                    break;
                }
                // 正常累和
                sum += val.get();
            }
            // 如果不在黑名
            if (!isInBlack) {
                // 输出
                context.write(key,new IntWritable(sum));
            }
        }

    }



    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        // driver就是驱动，将整个流程打通
        // 创建配置
        Configuration conf = new Configuration();
        // conf设置配置的
        // 配置输出的格式
//        conf.set("mapred.output.compress","true");
//        conf.set("mapred.output.compression.codec","org.apache.hadoop.io.compress.GzipCodec");
        // 这个api其实就是把mapreduce相关的参数直接封装到conf里面，把其它的参数提取出来
        GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
        // 非mapreduce参数，也就是我们执行程序需要的参数比如我们传的文件地址
        String[] remainingArgs = optionParser.getRemainingArgs();
        System.out.println(Arrays.toString(args));
        System.out.println(Arrays.toString(remainingArgs));
        // 创建任务
        Job job = Job.getInstance(conf, "job021-kxr-wordcount");

        // 运行的主类、mapper、combine、reducer
        job.setJarByClass(MRPractice5.class);
        job.setMapperClass(MRPractice5Mapper.class);

//        job.setPartitionerClass(MyPartition.class);

        // combiner还应该存在吗？ 因为局部聚合的话，会因为黑名单 的问题，导致数据无法进入reducer阶段，如果其它map还包含这个数据，这个数据就
        // 不会被黑名单筛选掉
//        job.setCombinerClass(WordCountReducer.class);
        job.setReducerClass(MRPractice5Reducer.class);

        // 配置输出的kv形式
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        // 指定输入和输出的目录
        FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
        FileInputFormat.addInputPath(job, new Path(remainingArgs[1]));
        FileOutputFormat.setOutputPath(job, new Path(remainingArgs[2]));

        // 开启任务，等待执行
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}
