package com.maomi.coder;

import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import java.io.IOException;
import java.util.Arrays;
import java.util.List;

/**
 * Created by lucky on 2019-03-29.
 */
public class StopAnalyzerDemo extends Analyzer {

    private CharArraySet stopWordSet;

    public StopAnalyzerDemo(List<String> stopWords){
        this();
        stopWordSet = CharArraySet.copy(getStopWordSet());
        stopWordSet.addAll(StopFilter.makeStopSet(stopWords));
    }
    public StopAnalyzerDemo() {
        super();
        setStopWordSet(StopAnalyzer.ENGLISH_STOP_WORDS_SET);
    }

    public void setStopWordSet(CharArraySet stopWordSet) {
        this.stopWordSet = stopWordSet;
    }

    public CharArraySet getStopWordSet() {
        return this.stopWordSet;
    }

    @Override
    protected TokenStreamComponents createComponents(String s) {
        Tokenizer source = new LowerCaseTokenizer();

        return new TokenStreamComponents(source, new StopFilter(source, stopWordSet));
    }

    public static void main(String[] args) throws IOException {
        String text = "我 和 你 心连心,哈哈哈哈哈 是 笨蛋。";

        List<String> list = Arrays.asList(new String[]{"和","是","你","我","他","她","笨蛋"});

        Analyzer analyzer = new StopAnalyzerDemo(list);

        TokenStream tokenStream = analyzer.tokenStream("text", text);

        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);

        tokenStream.reset();

        while (tokenStream.incrementToken()){
            System.out.println(charTermAttribute);
        }
    }

}
