package com.shujia.mr.filter;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FilterScoreMapper extends Mapper<LongWritable, Text,Text,Text> {
    /*
        1.读取reduceJoin的结果数据
        2.对数据进行切分
        3.找出成绩数据，并对分数大于450的学生进行过滤出来
     */

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);
        //1500100001	施笑槐,22,女,文科六班,406
        String oneLine = value.toString();

        String[] split = oneLine.split("\t");
        String id = split[0];
        String[] columns = split[1].split(","); // 施笑槐,22,女,文科六班,406
        if (Integer.parseInt(columns[4]) > 450) {
            // 大于450分，那么可以直接写出数据
            context.write(new Text(id),new Text(split[1]));
        }

    }
}
