package hadoopDemo;

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

//  public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
//LongWritable, is line num....text  is line..   out is word,and num,so is text,intwriteable
public class WCMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

	// 1.mapper阶段，切片
	// 1).mapper类首先要继承自mapper类，指定输入的key类型，输入的value类型
	// 2).指定输出的key类型，输出的value类型
	// 3).重写map方法
	// 在map方法里面获取的是文本的行号，一行文本的内容，写出的上下文对象


//jobContextImpl中包括了InputFormat的get和set方法，默认的实现是TextInputFormat ---读取文件的行，行的偏移量为key，行的内容为value。
	@Override
	protected void map(LongWritable lineNum, Text value_line, Context context) throws IOException, InterruptedException {
		String line = value_line.toString();
		String[] words = line.split(" ");
		for (String word : words) {
			Text key_Text = new Text();
			key_Text.set(word);
			
			
			IntWritable val_IntWritable = new IntWritable(1);			
			context.write(key_Text, val_IntWritable); //word  , 1
		}
	}
}
