package com.shujia.mr.scoreCount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class ScoreCountMapper extends Mapper<LongWritable, Text,LongWritable, IntWritable> {
    /**
     *  Mapper端处理逻辑：
     *      1.获取一行数据
     *      2.切分每一行中的每一列数据
     *      3.获取学生ID 和 学生成绩 将数据写出到Reduce进行聚合操作
     *
     *
     *  按照规范编写代码
     *      输入：
     *          key：偏移量
     *          value: 文本一行数据
     *
     *      输出：
     *          key:ID
     *          value:score
     */

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, LongWritable, IntWritable>.Context context) throws IOException, InterruptedException {
//        super.map(key, value, context);
        // value: 1500100001,1000001,98
        String[] columns = value.toString().split(",");
        String id = columns[0];
        String score = columns[2];
        // Long 是Java中的包装类
        context.write(new LongWritable(Long.parseLong(id)),new IntWritable(Integer.parseInt(score)));
    }
}
