package cn.hesion.mrmapjoin;
/**
 * ClassName: MapJoinMapper <br/>
 * Description: <br/>
 * date: 2021/2/3 14:07<br/>
 *
 * @author Hesion<br />
 * @version
 * @since JDK 1.8
 */

import cn.hesion.mrreducejoin.DeliverBean;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;

/**
 * @program: ClientDemo
 * @description:
 * @author: hesion
 * @create: 2021-02-03 14:07
 **/
public class MapJoinMapper extends Mapper<LongWritable, Text, Text, NullWritable> {

    String name;
    DeliverBean bean = new DeliverBean();
    Text k = new Text();
    Map<String, String> pMap = new HashMap<>();

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        // 读取缓冲文件
        BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream("D:/大数据学习/大数据正式班第一阶段模块一/模块一/资料/MRjoin/map_join/cache/position.txt"), "UTF-8"));
        String line;
        while (StringUtils.isNotEmpty(line = reader.readLine())){
            //2 切割
            String[] fields = line.split("\t");
            //3 缓存数据到集合
            pMap.put(fields[0],fields[1]);
        }

        //4 关流
        reader.close();
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //获取一行
        String line = value.toString();
        // 截取
        String[] fields = line.split("\t");

        //获取职位id
        String pId = fields[1];

        //拼接
        String pName = pMap.get(pId);
        k.set(line+"\t"+pName);
        //写出
        context.write(k,NullWritable.get());

    }
}
