package com.nt.mapreduce020201_hello.yarn.wordCount;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class WordCountMapper extends Mapper<LongWritable, Text, Text,
        IntWritable> {
    //创建对象
    private IntWritable one = new IntWritable(1);

    /**
     *
     * @param key   5当前行对应整个文件的索引,对应的是第一个泛型
     *              key 写出到 reduce的key
     *              value 写出到reduce的value
     * @param value 行读取器本次读取的一行记录,对应的是第一个泛型
     * @param context 将 map 的结果写入到环形数据缓冲区
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {
        System.out.println("key-->"+key+"---value-->"+value);
        //替换特殊字符
        String valueString = value.toString();
        valueString = valueString.replaceAll("[^a-zA-Z0-9'\\s]", "");
        //切分字符串
        String[] values = valueString.split(" ");
        //向里面添加数据
        for (String val : values) {
            context.write(new Text(val), one);
        }
    }
}