package com.shujia.mr.filter;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * 需求：
 *      读取count代码的执行结果，从结果中过滤出总分大于400分的学生
 *
 * Mapper端的逻辑：
 *      1.读取文件中的数据，并做切分
 *      2.对数据进行判断，如果满足条件，那么可以直接写出
 *
 */
public class MyFilterMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
        String[] columns = value.toString().split("\t");
        int score = Integer.parseInt(columns[1]);
        String studentID = columns[0];

        if(score >= 400){
            context.write(new Text(studentID),new IntWritable(score));
        }

    }
}
