package com.tledu.mr;

import com.tledu.hdfs.HdfsUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;

public class WhiteList {
    /**
     * map 输出的k：名字  v：报销的金额
     * <p>
     * reduce 输出 k：名字 v：报销的金额和
     */

    public static class WhiteListMapper extends Mapper<Object, Text, Text, IntWritable> {

        private Set<String> whiteList;

        private Text word = new Text();
        private IntWritable val = new IntWritable();

        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            // 创建完mapper实例只会执行一次，可以在这里进行输出初始化处理
            String whiteListStr = context.getConfiguration().get("whiteList");
            // window的换行是/r/n 和 linux是不同的
            String[] whiteListArr = whiteListStr.replace("\r\n","\n").split("\n");
            System.out.println(Arrays.toString(whiteListArr));
            whiteList = new HashSet<>();
            whiteList.addAll(Arrays.asList(whiteListArr));
            whiteList.forEach(item -> System.out.println(item+"-"));
        }

        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            // 我们希望获取到白名单的信息
            String[] valArr = value.toString().split(" ");
            if (whiteList.contains(valArr[0])) {
                // 说名在白名单里，是需要输出的
                word.set(valArr[0]);
                val.set(Integer.parseInt(valArr[1]));
                context.write(word, val);
            }
        }
    }

    public static class WhiteListReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            context.write(key, new IntWritable(sum));
        }
    }

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        // 1. 设置mr的配置 。 2. 可以传递数据
        GenericOptionsParser genericOptionsParser = new GenericOptionsParser(conf, args);
        String[] remainingArgs = genericOptionsParser.getRemainingArgs();
        // 输入的地址之后，就可以读取出来白名单的信息
        // 设计传递进来的第一个参数是白名单的信息
        String whiteListPath = remainingArgs[0];
        String whiteListStr = HdfsUtils.readHdfsFile(whiteListPath, "utf8");
        conf.set("whiteList", whiteListStr);

        Job job = Job.getInstance(conf, "kxr-whitelist-job021");
        // 配置jar
        job.setJarByClass(WhiteList.class);
        job.setMapperClass(WhiteListMapper.class);
        job.setCombinerClass(WhiteListReducer.class);
        job.setReducerClass(WhiteListReducer.class);

        // 设置输出kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        // 添加文件地址
        FileInputFormat.addInputPath(job, new Path(remainingArgs[1]));
        FileOutputFormat.setOutputPath(job, new Path(remainingArgs[2]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
