package com.maxbill.hadoop.reduce;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.*;

/**
 * @功能
 * @作者 zuoshuai(MaxBill)
 * @日期 2017/11/17
 */
public class MyMap extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    /**
     * @作者 zuoshuai(MaxBill)
     * @日期 2017/11/17
     * @时间 14:46
     */
    public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
        //未使用分词器
        //String line = value.toString();
        //StringTokenizer tokenizer = new StringTokenizer(line);
        // hile (tokenizer.hasMoreTokens()) {
        //word.set(tokenizer.nextToken());
        //output.collect(word, one);
        //}
        //使用分词器
        byte[] btValue = value.getBytes();
        InputStream ip = new ByteArrayInputStream(btValue);
        Reader reader = new InputStreamReader(ip);
        IKSegmenter iks = new IKSegmenter(reader, true);
        Lexeme lexeme;
        while ((lexeme = iks.next()) != null) {
            //打印全部分词
            //System.err.println(lexeme.getLexemeText());
            word.set(lexeme.getLexemeText());
            output.collect(word, one);
        }
    }

}