package mygroup.test.hadooptest;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import javax.naming.Context;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.wltea.analyzer.core.IKSegmenter;


public class MyMap<Lexeme> extends Mapper<Object,Text,Text,IntWritable>{
	
	private static final IntWritable one =  new IntWritable(1);  
    
    private Text word = new Text();
    String regex="[\u4e00-\u9fa5]+";  

    protected void map(Object key, Text value, Context context)  
            throws IOException, InterruptedException {  
        String line = value.toString();    
        boolean isc = isContainChinese(line); 
        
        if(isc){
        	line=line.replaceAll("[^\\u4e00-\\u9fa5]", "");
        	ArrayList<String> list = IK(line);
        	Iterator it = list.iterator();
        	while(it.hasNext()){
        		context.write((Text) it.next(), one);
        	}
        }
    }
    
    public boolean isContainChinese(String str){
    	Pattern p = Pattern.compile(regex); 
        Matcher m = p.matcher(str);  
        if (m.find()) {
            return true;
        }
        return false;
    }
    
    public ArrayList<String> IK(String str){
    	ArrayList<String> list = new ArrayList<String>();
    	try {
    		byte[] bt = str.getBytes(); 
        	InputStream is = new ByteArrayInputStream(bt);
        	InputStreamReader reader = new InputStreamReader(is); 
        	IKSegmenter ik = new IKSegmenter(reader,true); 
        	org.wltea.analyzer.core.Lexeme lexeme ;
    		while((lexeme = ik.next())!=null){
    			String text = lexeme.getLexemeText(); 
    			list.add(text);
    		}
    	} catch (IOException e) {
    		e.printStackTrace();
    	}
    	return list;
    }
}
