package sharewithus.analyzer.lucene;

import java.io.Reader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;

public final class SUAnalyzer extends Analyzer {

	private boolean isMaxWordLength = false;

	/**
	 * SU分词器Lucene Analyzer接口实现类 
	 * 默认二元词频分词
	 */
	public SUAnalyzer() {
		this(false);
	}

	/**
	 * SU分词器Lucene Analyzer接口实现类
	 * 
	 * @param isMaxWordLength
	 *        当为true时，分词器进行最大词长切分
	 */
	public SUAnalyzer(boolean isMaxWordLength) {
		super();
		this.setMaxWordLength(isMaxWordLength);
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String,
	 * java.io.Reader)
	 */
	@Override
	public TokenStream tokenStream(String fieldName, Reader reader) {
		return new SUTokenizer(reader, isMaxWordLength());
	}

	public void setMaxWordLength(boolean isMaxWordLength) {
		this.isMaxWordLength = isMaxWordLength;
	}

	public boolean isMaxWordLength() {
		return isMaxWordLength;
	}
}
