package job05;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;

import job01.TProperties;

public class CountTopnMapper extends Mapper<LongWritable, Text, Text, Text> {
	private String topkey ;
	private Text okey = new Text();
	//获取分组字段位置
	@Override
	protected void setup( Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
		topkey = context.getConfiguration().get("topkey");
	}
	//以分组字段为key
	@Override
	public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
		String[] values = value.toString().split(TProperties.getValue("fileoutsplit"));
		okey = new Text(values[Integer.parseInt(topkey)]);
		//按用户分组
		context.write(okey, value);
	}
}
