package com.bigdata.assignment.problem3;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class WordCountOptimizedMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

    private final Text word = new Text();
    private final static IntWritable ONE = new IntWritable(1);

    // 自定义计数器
    public static enum COUNTERS {
        INPUT_LINES,
        CLEANED_WORDS,
        INVALID_LINES
    }

    @Override
    public void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {

        context.getCounter(COUNTERS.INPUT_LINES).increment(1);

        if (value == null) {
            context.getCounter(COUNTERS.INVALID_LINES).increment(1);
            return;
        }

        String line = value.toString().toLowerCase().replaceAll("[^a-z\\s]", " ");

        String[] tokens = line.split("\\s+");
        for (String token : tokens) {
            if (token.length() == 0) continue;

            word.set(token);
            context.write(word, ONE);

            context.getCounter(COUNTERS.CLEANED_WORDS).increment(1);
        }
    }
}
