package com.csthink.mr.join.mapjoin;

import com.csthink.mr.join.reducejoin.EmpInfo;
import com.csthink.utils.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;

/**
 * 需求: 读取 data/join 目录下的两个文件（dept.txt 部门表数据，emp.txt 员工表数据）
 * 实现 select e.emp_id, e.emp_name, e.dept_id, d.dept_name from emp e join dept d on emp.dept_id = dept.dept_id;
 *
 * @author <a href="mailto:csthink@icloud.com">Mars</a>
 * @since 2024-04-16 15:08
 */
public class MapJoinApp {

    public static void main(String[] args) throws Exception {
        // 读取大表
        String input = "data/join/emp.txt";
        String output = "out";

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJobName("ReduceJoinApp");

        FileUtils.deleteIfExists(conf, output);

        job.setJarByClass(MapJoinApp.class);
        job.setMapperClass(MyMapper.class);

        job.setNumReduceTasks(0);
        job.setMapOutputKeyClass(EmpInfo.class);
        job.setMapOutputValueClass(NullWritable.class);

        // 加载小表的数据到缓存中
        job.addCacheFile(new URI("data/join/dept.txt"));

        FileInputFormat.setInputPaths(job, new Path(input));
        FileOutputFormat.setOutputPath(job, new Path(output));

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

    public static class MyMapper extends Mapper<LongWritable, Text, EmpInfo, NullWritable> {

        Map<String, String> cache = new HashMap<>();

        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            URI[] cacheFiles = context.getCacheFiles();
            String path = cacheFiles[0].getPath();

            BufferedReader reader = new BufferedReader(
                    new InputStreamReader(Files.newInputStream(Paths.get(path)), "UTF-8")
            );

            // 流的方式从缓存中读取 dept.txt 数据，将第一列和第二列的数据存到 cache map 中
            String line = null;
            while (StringUtils.isNotEmpty(line = reader.readLine())) {
                String[] fields = line.split("\t");
                cache.put(fields[0].trim(), fields[1].trim());
            }

            IOUtils.closeStream(reader);
        }

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            // 从大表读取内容 emp.txt
            String[] splits = value.toString().split("\t");
            String empId = splits[0].trim();
            String empName = splits[1].trim();
            int deptNo = Integer.parseInt(splits[7].trim());

            EmpInfo empInfo = new EmpInfo();
            empInfo.setEmpId(empId);
            empInfo.setEmpName(empName);
            empInfo.setDeptNo(deptNo);

            empInfo.setDeptName(cache.getOrDefault(deptNo + "", StringUtils.EMPTY));
            context.write(empInfo, NullWritable.get());
        }
    }


}
