package org.liuyuantao.lucene.analyzer;

import java.io.IOException;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.util.CharArraySet;

public class MyStopAnalyzer extends Analyzer {
    @SuppressWarnings("rawtypes")
    private Set stops;

    @SuppressWarnings("unchecked")
    public MyStopAnalyzer(String[] sws) {
        //会自动将字符串数组转换为Set
        stops = StopFilter.makeStopSet(sws, true);
        //将原有的停用词加入到现在的停用词
        stops.addAll(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
    }

    public MyStopAnalyzer() {
        //获取原有的停用词
        stops = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
    }

    @Override
    protected TokenStreamComponents createComponents(String fieldName) {
        StandardTokenizer tokenizer = new StandardTokenizer();
        LetterTokenizer letterTokenizer = new LetterTokenizer(tokenizer.getAttributeFactory());
        LowerCaseFilter lowerCaseFilter = new LowerCaseFilter(letterTokenizer);
        CharArraySet stopWords = new CharArraySet(stops, true);
        StopFilter stopFilter = new StopFilter(lowerCaseFilter, stopWords);
        //为这个分词器设定过滤链和Tokenizer
        return new TokenStreamComponents(letterTokenizer, stopFilter);
    }


}
