package com.hadoop.mr.mapJoin;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.*;
import java.util.HashMap;
import java.util.Map;

/**
 * @program: hadoop
 * @description: maperJoin
 * @author: Qiang.Ye
 * @date: 2019-06-09 08:45
 *
 * 学习点：
 * 1.两个表join
 * 2.小表加缓存
 * 3.setup的使用
 **/
public class DistributedCacheMapper extends Mapper<LongWritable, Text,Text, NullWritable> {

    Map<String,String> pdMap = new HashMap<>();

    /** 
    * @Description: 初始化小表
    * @Author: Qiang.Ye
    * @Date: 2019/6/9 
    * @param context 
    * @return: void 
    */ 
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        //把pd文件加载进来
        BufferedReader bfr = new BufferedReader(new InputStreamReader(
                new FileInputStream(new File("D:\\data\\Date\\A\\pd.txt")),"UTF-8"));

        String line ;
        while (StringUtils.isNotEmpty(line = bfr.readLine())){
            String[] fileds = line.split("\t");
            //产品ID
            String pid  = fileds[0];
            //产品名称
            String pname  = fileds[1];
            pdMap.put(pid,pname);
        }
        bfr.close();
    }

    Text k = new Text();
    @Override
    protected void map(LongWritable key,
                       Text value,
                       Context context) throws IOException, InterruptedException {
        //转类型
        String line = value.toString();
        //切分  每行数据：1001	01	1
        String[] fileds = line.split("\t");
        //订单ID,产品ID，金额
        String orderId = fileds[0];
        String pid = fileds[1];
        String amount = fileds[2];

        String pname = pdMap.get(pid);
        k.set(orderId + "\t" + pname + "\t" + amount);
        //输出
        context.write(k,NullWritable.get());


    }
}
