package com.xiguanlezz.cn.course4;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.util.CharArraySet;

/**
 * @Author Chen Jie
 * @Date 2024/10/20 9:17
 * @Description
 * @Version 1.0
 */
public class StopAnalyzerFlawed extends Analyzer {
    /**
     * 停用词集合
     */
    private CharArraySet stopWords;

    public StopAnalyzerFlawed() {
        stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
    }

    public StopAnalyzerFlawed(String[] stopWords) {
        this.stopWords = StopFilter.makeStopSet(stopWords);
    }


    @Override
    protected Analyzer.TokenStreamComponents createComponents(String fieldName) {
        // 按照非字母拆分的Tokenizer
        Tokenizer source = new LetterTokenizer();
        // 按照停用词列表移除被停用的语汇单元
        StopFilter stopFilter = new StopFilter(source, this.stopWords);
        // 将接收到的Token转换为小写
        LowerCaseFilter lcFilter = new LowerCaseFilter(stopFilter);

        return new Analyzer.TokenStreamComponents(source, lcFilter);
    }
}
