package cuilitang.hadoop.mapreduce;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * mapper 负责读取内容，根据空格将词分开，然后存入context，供reduce读取。
 */
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

    Text keyOut = new Text(); //word
    IntWritable valueOut = new IntWritable(1); //counter

    @Override
    protected void map(LongWritable keyIn,
                       Text valueIn,
                       Mapper<LongWritable, Text, Text, IntWritable>.Context context
    ) throws IOException, InterruptedException {
        //获取一行，使用空格切割，然后输出
        String line = valueIn.toString();
        String[] wordList = line.split(" ");



        //写入context
        for (String word: wordList) {
            keyOut.set(word.replace(",","").replace(".","").
                    replace(":","").replace("“","").replace("”","").
                    replace("\"",""));
            context.write(keyOut,valueOut);
            
        }
     }
}
