package com.shujia.mr.wc2;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * 自己编写的Map类要继承hadoop包中的Mapper类，才算是MR中Map任务
 * <p>
 * MR底层Map阶段中读取文件数据，默认是用一个叫做LineRecoder（行记录读取器）按照行进行读取的
 * 并且读取到Map阶段开始之前，数据是以键值对的方式进入Map阶段的
 * <p>
 * 原始一行数据：
 * hello world java
 * 经过底层读取到一行封装之后的数据：
 * <0L, "hello world java">
 * 按照单词统计的逻辑。出来的数据：
 * <"hello",1L>
 * <"world",1L>
 * <"java",1L>
 * <p>
 * hadoop中的数据类型对应到java中的数据类型
 * ByteWritable - byte
 * ShortWritable - short
 * IntWritable - int
 * LongWritable - long
 * FloatWritable - float
 * DoubleWritable - double
 * BooleanWritable - boolean
 * Text - String
 */
public class MyMapper extends Mapper<LongWritable, Text, LongWritable, Text> {
    //重写map方法，让map任务执行我们自己定义的逻辑
    //读取到每一行数据都会执行这里的map方法逻辑
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, LongWritable, Text>.Context context) throws IOException, InterruptedException {
        //<0L, "hello world java">
        //将hadoop中Text类型转成java中String类型
        String line = value.toString();

        line = line.replaceAll(", ", " ")
                .replaceAll(" - ", " ")
                .replaceAll("'", " ")
                .replaceAll("\\. ", " ")
                .replaceAll("\\.", " ")
                .replaceAll(","," ")
                .toLowerCase();

        context.write(key,new Text(line));
    }
}
