package cn.sumpu.toolkit.fastext.seg;

import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.dic.Dictionary;
import org.wltea.analyzer.lucene.IKTokenizer;

public class Segmentation {

	public static List<String> analyze(String text) { 
		IKTokenizer tokenizer = new IKTokenizer(new StringReader(text) , false);
		List<String> ret = new ArrayList<String>();
		try {
			while(tokenizer.incrementToken()){
				CharTermAttribute termAtt = tokenizer.getAttribute(CharTermAttribute.class);
				ret.add(termAtt.toString());				
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
		return ret;
	}
	
	public static void main(String[] args) {
		List<String> extWords = new ArrayList<String>();
		extWords.add("这是个网络新词必须要分成一个词");
		extWords.add("这也要分成一个新词");
		extWords.add("姐与天宫");
		extWords.add("内要");
		Dictionary.loadExtendWords(extWords);
		List<String> ret = Segmentation.analyze("hold姐与天宫交会对接这是个网络新词必须要分成一个词这也要分成一个新词：神舟八号5公里内要4次胡锦涛“刹车”");
		for (String str : ret) {
			System.out.println(str);
		}
	}
}
