package org.wj.config.wordcount;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

import java.io.IOException;

public class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, LongWritable> {

    /**
     * Maps a single input key/value pair into an intermediate key/value pair.
     *
     * <p>Output pairs need not be of the same types as input pairs.  A given
     * input pair may map to zero or many output pairs.  Output pairs are
     * collected with calls to
     * {@link OutputCollector#collect(Object, Object)}.</p>
     *
     * <p>Applications can use the {@link Reporter} provided to report progress
     * or just indicate that they are alive. In scenarios where the application
     * takes significant amount of time to process individual key/value
     * pairs, this is crucial since the framework might assume that the task has
     * timed-out and kill that task. The other way of avoiding this is to set
     * <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.task.timeout">
     * mapreduce.task.timeout</a> to a high-enough value (or even zero for no
     * time-outs).</p>
     *
     * @param key      the input key.
     * @param value    the input value.
     * @param output   collects mapped keys and values.
     * @param reporter facility to report progress.
     */
    @Override
    public void map(LongWritable key, Text value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException {

        String[] words = value.toString().split(" ");
        for (String word : words) {
            LongWritable outVal = new LongWritable(1);
            Text outKey = new Text(word);
            output.collect(outKey, outVal);
        }


    }
}
