package ReduceJoin;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class RJMapper extends Mapper<LongWritable, Text,Text,TableBean> {
    //封装bean对象
    TableBean bean = new TableBean();
    //封装k值
    Text k = new Text();
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //通过上下文获取文件切片
        FileSplit split = (FileSplit) context.getInputSplit();
        //由文件切片获取文件名
        String filename = split.getPath().getName();
        //获取输入的文件数据
        String line = value.toString();
        //通过不同额的文件名分别处理数据
        if(filename.startsWith("order")){
            //分割字段
            String[] fieds = line.split("\t");
            //封装bean对象
            bean.setOrder_id(fieds[0]);
            bean.setP_id(fieds[1]);
            bean.setAmount(Integer.parseInt(fieds[2]));
            //需要些默认值或空字符串
            bean.setName("");
            //order表代表0
            bean.setFlag("0");
            //封装输出key(p_id)
            k.set(fieds[1]);
            /*
            * {01,TableBean={order_id:201801,p_id:01,amount:1,name:"",flag="0"}}
            *
            * */
        }else{//(pd.txt)
            //分割字段
            String[] fieds = line.split("\t");
            //封装bean对象
            bean.setOrder_id("");
            bean.setP_id(fieds[0]);
            //设置默认为0
            bean.setAmount(0);
            bean.setName(fieds[1]);
            //pd.txt表代表1
            bean.setFlag("1");
            //封装输出key(p_id)
            k.set(fieds[0]);
            /*
             * {01,TableBean={order_id:"",p_id:01,amount:0,name:苹果,flag="1"}}
             *
             *
             */

        }
        //写出
        context.write(k,bean);



    }
}
