package org.nlp.algo.classifier;

import java.io.CharArrayReader;
import java.io.IOException;
import java.util.Stack;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import com.aliasi.tokenizer.Tokenizer;

/**
 * @author longkeyy
 * 
 */
public class LuceneTokenizer extends Tokenizer {

	private static Stack<String> wordDocs = new Stack<String>();

	public LuceneTokenizer(char[] input, Analyzer analyzer) {
		try {
			TokenStream token = analyzer.tokenStream("", new CharArrayReader(
					input));
			CharTermAttribute term = token
					.addAttribute(CharTermAttribute.class);
			while (token.incrementToken()) {
				wordDocs.push(term.toString());
			}
		} catch (IOException e) {
			e.printStackTrace();
		}
	}	

	@Override
	public String nextToken() {
		return wordDocs.size() > 0 ? wordDocs.pop() : null;
	}

}
