package com.v.reduceJoin;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class TableMapper extends Mapper<LongWritable, Text,Text,TableBean> {


    private Text OutK = new Text();
    private TableBean OutV = new TableBean();
    private String filename;

    @Override
    protected void setup(Mapper<LongWritable, Text, Text, TableBean>.Context context) throws IOException, InterruptedException {

        FileSplit inputSplit = (FileSplit) context.getInputSplit();

        filename = inputSplit.getPath().getName();
    }

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, TableBean>.Context context) throws IOException, InterruptedException {



        //分割字段
        String str = value.toString();

        //判断是哪个文件传进来的
        if(filename.contains("order")){//开第一个mapTask
            //分割切片
            String[] split = str.split("\t");

            //设置K和V的每个字段
            OutK.set(split[1]);
            OutV.setId(split[0]);
            OutV.setPid(split[1]);
            OutV.setAmout(Integer.parseInt(split[2]));
            OutV.setPname("");
            OutV.setFlag("order");


        } else {//开第二个mapTask

            String[] split = str.split("\t");

            OutK.set(split[0]);
            OutV.setId("");
            OutV.setPid(split[0]);
            OutV.setAmout(0);
            OutV.setPname(split[1]);
            OutV.setFlag("pd");
        }

        //写出
        context.write(OutK,OutV);




    }
}
