package cn.ysnk.clz.util;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

public class IKSplitWord {

	public static List<String> splitWord(String str){
		List<String> keywordList = new ArrayList<String>();  
        try {  
            byte[] bt = str.getBytes();  
            InputStream ip = new ByteArrayInputStream(bt);  
            Reader read = new InputStreamReader(ip);  
            IKSegmenter iks = new IKSegmenter(read,true);//true开启只能分词模式，如果不设置默认为false，也就是细粒度分割  
            Lexeme t;  
            while ((t = iks.next()) != null) {  
            	if(t.getLexemeText().length()>0)
            		keywordList.add(t.getLexemeText());  
            }  
        } catch (IOException e) {  
            e.printStackTrace();  
        }   
        return keywordList;
	}
	
}
