package com.hw.mapreduce.service;

import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.List;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.JiebaSegmenter.SegMode;
import com.huaban.analysis.jieba.SegToken;
/**
 * @author hw
 * 
 * @Map阶段
 * 
 * 输入：KEY-LongWritable，VALUE-Text
 * 
 * 输出：KEY-Text，VALUE-LongWritable
 */
public class WordMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
	
	private static Logger log=LoggerFactory.getLogger(WordMapper.class);
	private final static LongWritable one = new LongWritable(1);
	private Text word = new Text();
	
//	MapTask 会对输入的每一行数据调用map()方法进行处理
	/**
	 * @param key
	 * 默认情况下，是mapreduce所读取到的一行文本的起始偏移量
	 * @param value
	 * 默认情况下，是mapreduce所读取到的一行文本的内容，hadoop中的序列化类型为Text
	 * @param context
	 * 是用户自定义逻辑处理完成后输出类定义的KEY，VALUE，交给后续的produce处理
	 * @throws InterruptedException 
	 * @throws IOException 
	 */
	public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException{
		
//		防止中文乱码
		String sentence = new String(value.getBytes(), 0, value.getLength(), "UTF-8").trim();
		
		if (StringUtils.isNotEmpty(sentence)) {
			// 使用jieba分词器
			JiebaSegmenter segmenter = new JiebaSegmenter();
	        List<SegToken> tokens=segmenter.process(sentence, SegMode.SEARCH);
	        for(SegToken token:tokens){
	        	log.info(token.word + ":开始位置"+token.startOffset);
	        	word.set(token.word);
	        	context.write(word, one);
	        }
		}
	}

}
