package com.shujia.mr.reduceJoin;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class StudentMapper extends Mapper<LongWritable, Text,Text,Text> {
    /*
        数据处理逻辑：
            1.读取两个文件中的数据
            2.区分每个数据是什么
            3.将每条数据中的学生ID取出 作为输出的Key
            4.将其他信息作为Value 输出
     */

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);
        // value是一行数据 1500100001,施笑槐,22,女,文科六班  /  1500100001	406

        String oneLine = value.toString();
        if (oneLine.contains(",")) {
            // 1500100001,施笑槐,22,女,文科六班
            String[] columns = oneLine.split(",");
            String keyId = columns[0];
            String valueStr = columns[1]+","+columns[2]+","+columns[3]+","+columns[4];
            context.write(new Text(keyId),new Text(valueStr));

        }else{
            // 1500100001	406
            String[] columns = oneLine.split("\t");
            String keyId = columns[0];
            String valueStr = columns[1];
            context.write(new Text(keyId),new Text(valueStr));
        }


    }
}
