package cn.lianxi.mrv2._04_;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * 拿到的数据 :
 * 
 * S1 80
 * 
 * S1 70
 * 
 * S2 90
 * 
 * S3 80
 * 
 * S3 70
 * 
 * S3 60
 * 
 * S4 100
 * 
 * @author yingfing
 * @date 2020年11月24日 @time 下午10:27:58
 */
public class TokenizerMapper extends
		Mapper<LongWritable, Text, MyWritable, IntWritable> {
	MyWritable k = new MyWritable();
	IntWritable v = new IntWritable();

	@Override
	protected void map(LongWritable key, Text value, Context context)
			throws IOException, InterruptedException {
		String[] fields = value.toString().split("\t");
		k.set(fields[0], Integer.parseInt(fields[1]));
		v.set(Integer.parseInt(fields[1]));
		// <"S1,80" ,"80 ">
		// <"S1,70" ,"70 ">
		context.write(k, v);
	}
}
