package com.shujia.wyh.mrdemos;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;

public class MyMapper2 extends Mapper<LongWritable, Text, Text, LongWritable> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
//        //将hadoop中Text类型转化成Java中String类型
//        String line = value.toString();
//        //将这一行将要被分词的数据封装成IK分词的对象
//        StringReader stringReader = new StringReader(line);
//
//        //创建一个分词器对象
//        IKSegmenter ikSegmenter = new IKSegmenter(stringReader, true);
//
//        Lexeme lexeme = null;
//        while ((lexeme = ikSegmenter.next()) != null) {
//            String word = lexeme.getLexemeText();
//            if ("曹操".equals(word) || "刘备".equals(word) || "董卓".equals(word)) {
//                context.write(new Text(word), new LongWritable(1L));
//            }
//        }

        context.write(new Text("当前是第【"+key+"】行，该行的内容是：【"+value+"】"),new LongWritable(1L));
    }
}
