package com.v5.mr.combiner.wc;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

//                                           k1            v1     k2        v2
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>{

	@Override
	protected void map(LongWritable k1, Text v1, Context context)
			throws IOException, InterruptedException {
		/*
		 * context代表Map的上下文
		 * 上文：HDFS
		 * 下文是：Reducer
		 */
		//数据: I love Beijing
		String data = v1.toString();
		
		//分词
		String[] words = data.split(" ");
		
		//输出:  k2     v2
		for(String w:words){
			context.write(new Text(w), new LongWritable(1));
		}
	}

}
