package com.example.hadoop.mapreduce.orderjoin;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;

/**
 * Created with IntelliJ IDEA.
 *
 * @Auther: Brian
 * @Date: 2020/04/28/8:21
 * @Description:
 * 数据倾斜： 当有多个Reducer的情况下，不同的reducer拿到的订单信息数据可能不一样
 * 这就导致了有些reducer占用的内存多，有些reducer占用内存少，这就是数据倾斜
 *
 * 解决方案：
 * 在Map端join产品表，在执行mapper之前把产品表加载到内存中，mapper()方法就直接获取对应的产品信息
 * 这样可以避免数据倾斜
 */
public class OrderMapSideJoin {
    static class OrderMapSideJoinMapper extends Mapper<LongWritable, Text, OrderInfoBean, NullWritable> {

        // 存放产品信息
        Map<String, OrderInfoBean> productBeanMap = new HashMap<>();

        OrderInfoBean k = new OrderInfoBean();

        /**
         * setup 方法在map task处理数据之前会调用一次，可以在此方法内初始化需要加载到内存的信息
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("product.txt")));
            String line = null;
            while ((line = br.readLine()) != null) {
                String[] productInfos = line.split("\t");
                OrderInfoBean product = new OrderInfoBean();
                product.setProductName(productInfos[1]);
                product.setCategoryId(productInfos[2]);
                product.setPrice(Double.parseDouble(productInfos[3]));
                productBeanMap.put(productInfos[0], product);
            }
            br.close();
        }

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            //根据文件名判断是order的信息还是产品的信息
            String line = value.toString();
            String[] splits = line.split("\t");
            OrderInfoBean productInfo = productBeanMap.get(splits[2]);
            k.setOrderInfo(splits[0], splits[1], splits[2], Double.parseDouble(splits[3]));
            if (productInfo != null) {
                k.setProductName(productInfo.getProductName());
                k.setCategoryId(productInfo.getCategoryId());
                k.setPrice(productInfo.getPrice());
            }
            context.write(k, NullWritable.get());
        }

        public static void main(String[] args) throws Exception {
            Configuration conf = new Configuration();

            Job job = Job.getInstance();
            job.setJarByClass(OrderMapSideJoin.class);
            job.setMapperClass(OrderMapSideJoinMapper.class);
            job.setOutputValueClass(NullWritable.class);
            job.setOutputKeyClass(OrderInfoBean.class);

            //设置输入输出路径
            FileInputFormat.setInputPaths(job, new Path(args[0]));
            Path outputPath = new Path(args[1]);
            FileSystem fileSystem = FileSystem.get(conf);
            if (fileSystem.exists(outputPath)) {
                fileSystem.delete(outputPath, true);
            }
            FileOutputFormat.setOutputPath(job, outputPath);

            //将产品表缓存到所有map task工作节点的工作目录下
            job.addCacheFile(new URI("file:/F:/hadoop/test/mapjoin/product.txt"));

            //map 端join逻辑不需要reduce阶段，设置reducer的数量为0
            job.setNumReduceTasks(0);
            boolean res = job.waitForCompletion(true);
            System.out.println("Successfully? --> " + res);
            System.exit(res ? 0 : 1);
        }
    }
}
