package com.shengzai.mapreduce.sort;


import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 *
 * 当Student作为Key写出时，由于MR过程中会针对Key进行做数据的排序，
 *      那么对于Student对象来说，Mr并不知道如何进行排序，所以这时，可以对Student进行做自定义的排序
 *
 * Mapper端：
 *      读取ReduceJoin中的数据，将所有的学生基本信息包装成一个Student的对象，
 *          并且将Student作为Key进行写出到Reduce端，写出时要求按照学生的分数进行排序。
 *
 */
public class SortMapper extends Mapper<LongWritable, Text,Student, NullWritable> {

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Student, NullWritable>.Context context) throws IOException, InterruptedException {
        String oneStudent = value.toString();
        String[] splits = oneStudent.split("\t");
        String studentID = splits[0];
        String[] columns = splits[1].split(",");
        String clazz = columns[3];
        int score = Integer.parseInt(columns[4]);
        int age = Integer.parseInt(columns[1]);
        String studentName = columns[0];

        Student student = new Student(studentID, studentName, score, clazz, age);
        context.write(student,NullWritable.get());

    }
}
