package org.mspring.mlog.support.lucene;

import java.io.IOException;
import java.io.Reader;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;

/**
 * 基于ANSJ的分词器
 * 
 * @author Gao Youbo
 * @since 2013年7月9日
 */
public class ANSJAnalyzer extends Analyzer {
    public Set<String> filter;
    private boolean pstemming = true;

    public ANSJAnalyzer() {

    }

    /**
     * @param filter
     *            停用词
     * @param pstemming
     *            是否分析词干
     */
    public ANSJAnalyzer(Set<String> filter, boolean pstemming) {
        this.filter = filter;
        this.pstemming = pstemming;
    }

    @Override
    public TokenStream tokenStream(String fieldName, Reader reader) {
        // TODO Auto-generated method stub
        Tokenizer tokenizer = new ANSJTokenizer(reader, filter, pstemming);
        return tokenizer;
    }

    @Override
    public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
        // TODO Auto-generated method stub
        Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
        if (tokenizer == null) {
            tokenizer = new ANSJTokenizer(reader, this.filter, this.pstemming);
            setPreviousTokenStream(tokenizer);
        } else {
            tokenizer.reset(reader);
        }
        return tokenizer;
    }
}
