package com.shujia.mr.stu;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Hashtable;

public class Demo04MapJoin {
    // Mapper
    public static class MapJoin extends Mapper<LongWritable, Text, Text, NullWritable> {
        Hashtable<String, Integer> sumScoreMap;

        @Override
        // 每个Map任务初始化的时候会执行一次
        protected void setup(Mapper<LongWritable, Text, Text, NullWritable>.Context context) throws IOException, InterruptedException {
            URI[] cacheFiles = context.getCacheFiles();
            if (cacheFiles.length == 1) {
                URI smallTable = cacheFiles[0];
                // 使用FileSystem加载小表的数据
                // 通过上下文环境获取任务运行时的配置
                Configuration conf = context.getConfiguration();
                FileSystem fs = FileSystem.get(conf);
                // 构建输入流
                FSDataInputStream fsDataInputStream = fs.open(new Path(smallTable));
                // 读取输入流
                BufferedReader br = new BufferedReader(new InputStreamReader(fsDataInputStream));
                String line;
                sumScoreMap = new Hashtable<>();
                while ((line = br.readLine()) != null) {
                    // 将小表的数据构建HashTable
                    // 以id作为K，总分作为Value
                    String[] splits = line.split("\t");
                    String id = splits[0];
                    Integer sumScore = Integer.parseInt(splits[1]);
                    sumScoreMap.put(id, sumScore);
                }

            }
        }

        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, NullWritable>.Context context) throws IOException, InterruptedException {
            // 借助sumScoreMap直接在Map端判定是否能够关联
            String id = value.toString().split(",")[0];
            // 通过id查询sumScoreMap获取该学生的总分
            Integer sumScore = sumScoreMap.getOrDefault(id, 0);
            context.write(new Text(value.toString() + "|" + sumScore), NullWritable.get());
        }
    }

    // Driver
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException, URISyntaxException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        // 配置Job
        job.setJobName("Demo04MapJoin");
        job.setJarByClass(Demo04MapJoin.class);

        // 配置Mapper
        job.setMapperClass(MapJoin.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);

        // 不需要Reducer
        job.setNumReduceTasks(0);

        // 验证args的长度
        if (args.length != 3) {
            System.out.println("请传入输入输出目录！");
            return;
        }

        String bigTable = args[0];
        String smallTable = args[1];
        String output = args[2];

        // 配置输入输出的路径
        // 学生信息数据
        FileInputFormat.addInputPath(job, new Path(bigTable));
        // 学生总分数据，广播小表数据
        // 这里底层会去广播数据，在Map阶段取数据的时候还需要用到FileSystem读取数据
        job.addCacheFile(new URI(smallTable));

        Path ouputPath = new Path(output);
        // 通过FileSystem来实现覆盖写入
        FileSystem fs = FileSystem.get(conf);
        if (fs.exists(ouputPath)) {
            fs.delete(ouputPath, true);
        }
        // 该目录不能存在，会自动创建，如果已存在则会直接报错
        FileOutputFormat.setOutputPath(job, ouputPath);

        // 启动任务
        // 等待任务的完成
        job.waitForCompletion(true);


    }
}
