package tianwang.services.integrate;

import java.io.BufferedReader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import edu.stanford.nlp.ling.Word;
import edu.stanford.nlp.objectbank.TokenizerFactory;
import edu.stanford.nlp.process.Tokenizer;
import edu.stanford.nlp.process.WhitespaceTokenizer;
import edu.stanford.nlp.process.PTBTokenizer.PTBTokenizerFactory;
/**
 * 利用Stanford的分词器进行分词，且不进行赋码
 * 
 * 可以对整个文本进行处理
 */
public class StanfordWordSegmentor{
	public List<String> segregateWithPTBTokenizer(String sentence) {
		TokenizerFactory<Word> factory = PTBTokenizerFactory.newTokenizerFactory();
		Tokenizer<Word> t = factory.getTokenizer(new BufferedReader(new StringReader(sentence)));
		
		List<String> result = new ArrayList<String>();
		while(t.hasNext()){
			result.add(t.next().word());
		}
		return result;
	}
	
	public List<String> segregateWithWhitespace(String sentence) {
		TokenizerFactory<Word> factory = new WhitespaceTokenizer.WhitespaceTokenizerFactory();//PTBTokenizerFactory.newTokenizerFactory();
		Tokenizer<Word> t = factory.getTokenizer(new BufferedReader(new StringReader(sentence)));
		
		List<String> result = new ArrayList<String>();
		while(t.hasNext()){
			result.add(t.next().word());
		}
		return result;
	}

}
