package sharewithus.analyzer.solr;

import java.io.Reader;
import java.util.Map;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.solr.analysis.BaseTokenizerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import sharewithus.analyzer.lucene.SUTokenizer;

/**
 * 实现Solr1.4分词器接口 基于SUTokenizer的实现
 * 
 * @author terry
 * 
 */
public class SUTokenizerFactory extends BaseTokenizerFactory {

	public static final Logger log = LoggerFactory
			.getLogger(SUTokenizerFactory.class);

	private boolean isMaxWordLength = false;

	/**
	 * SU分词器Solr TokenizerFactory接口实现类 默认二元词频分词
	 */
	public SUTokenizerFactory() {
	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see org.apache.solr.analysis.BaseTokenizerFactory#init(java.util.Map)
	 */
	public void init(Map<String, String> args) {
		super.init(args);
		isMaxWordLength = getBoolean("isMaxWordLength", false);
		log.info("isMaxWordLength : " + isMaxWordLength);
	}

	/*
	 * 
	 * (non-Javadoc)
	 * 
	 * @see org.apache.solr.analysis.TokenizerFactory#create(java.io.Reader)
	 */
	public Tokenizer create(Reader reader) {
		return new SUTokenizer(reader, isMaxWordLength());
	}

	public void setMaxWordLength(boolean isMaxWordLength) {
		this.isMaxWordLength = isMaxWordLength;
	}

	public boolean isMaxWordLength() {
		return isMaxWordLength;
	}

}
