package hadoop.test;

import com.chenlb.mmseg4j.*;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.io.StringReader;


public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

    /*
     * map方法是提供给map task进程来调用的，map task进程是每读取一行文本来调用一次我们自定义的map方法
     * map task在调用map方法时，传递的参数：
     *      一行的起始偏移量LongWritable作为key
     *      一行的文本内容Text作为value
     */
    @Override
    protected void map(LongWritable key, Text value,Context context) throws IOException, InterruptedException {
        Dictionary dictionary = Dictionary.getInstance();
        Seg seg = new ComplexSeg(dictionary);

        MMSeg mmSeg = new MMSeg(new StringReader(value.toString()), seg);

        Word word = null;

        while ((word = mmSeg.next()) != null) {
            context.write(new Text(word.toString()), new IntWritable(1));
        }
    }
}
