package mapred.filters.grep;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.File;
import java.io.IOException;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class GrepFilter {

    public static class GrepMapper extends Mapper<Object, Text, NullWritable, Text> {
        private String matchGrep = null;
        Pattern pattern;

        public void map(Object key, Text value, Context context) {

            // 对于每一行的输入，我们构建了一个正则对象的匹配器
            Matcher matcher = pattern.matcher(value.toString());
            // 匹配器会告诉开发人员，当前字符串与创建匹配器的Pattern是否符合
            // 如果匹配，则将当前行的值放入map的输出中
            if (matcher.find()) {
                try {
                    context.write(NullWritable.get(), value);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

        protected void setup(Context context) {
            // matchGrep来自job启动之前的配置
            matchGrep = context.getConfiguration().get("matchGrep");
            // 正则表达式经过编译以后，才能发挥它的性能
            pattern = Pattern.compile(matchGrep);
        }
    }

    public static class GrepReducer extends Reducer<NullWritable, Text, NullWritable, Text> {
        private Random random = new Random();
        private Double percentage;

        public void reduce(NullWritable key, Iterable<Text> values, Context context) {
            String strPercentage = context.getConfiguration().get("filter_percentage");
            percentage = Double.valueOf(strPercentage);

            for (Text value : values) {
// 方式一：不考虑filter_percentage参数
                try {
                    context.write(NullWritable.get(), value);
                } catch (Exception e) {
                    e.printStackTrace();
                }

//                double rand = random.nextDouble();
//                if (rand < percentage) {
//                    try {
//                context.write(NullWritable.get(), value);
//                    } catch (Exception e) {
//                        e.printStackTrace();
//                    }
//            }
            }
        }

    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        if (args.length != 3) {
            System.err.println(
                    "!!!!!!!!!!!!!! Usage!!!!!!!!!!!!!!: \nhadoop jar <jar-name> "
                            + "mapred.filters.grep.GrepFilter "
                            + "<grep-pattern> "
                            + "<input-path> "
                            + "<output-path>");
            System.exit(1);
        }
        Configuration conf = new Configuration();

        conf.set("matchGrep", args[0]);
        conf.setDouble("filter_percentage", 0.1);
        Job job = Job.getInstance(conf, "Grep");

        job.setJarByClass(GrepFilter.class);

        job.setMapperClass(GrepMapper.class);
        job.setCombinerClass(GrepReducer.class);
        job.setReducerClass(GrepReducer.class);

        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(args[1]));
        FileUtil.fullyDelete(new File(args[2]));
        FileOutputFormat.setOutputPath(job, new Path(args[2]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }
}
