package org.study.lucene.api.analyzer;

import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.cn.smart.HMMChineseTokenizer;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.tokenattributes.*;
import org.apache.lucene.util.IOUtils;

import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.*;

/**
 * SmartChineseAnalyzer 测试
 *
 * @author Administrator
 * @date 2021-12-29
 */
public class SmartChineseAnalyzerTest {
    public static void main(String[] args) throws IOException {
        String english = "Analysis is one of the main causes of slow indexing. Simply put, the more you analyze the slower analyze the indexing (in most cases).";
        //String chinese = "K8s 和 YARN 都不够好，全面解析 Facebook 自研流处理服务管理平台";
        //String chinese = "极客时间学习心得：用分类和聚焦全面夯实技术认知";
        String chinese = "交易中台架构设计：海量并发的高扩展，新业务秒级接入";
        testSmartChineseAnalyzer(english);
        testSmartChineseAnalyzer(chinese);
        testSmartChineseAnalyzerByCustom(english);
        testSmartChineseAnalyzerByCustom(chinese);
        testMySmartChineseAnalyzer(english);
        testMySmartChineseAnalyzer(chinese);
    }

    /**
     * SmartChineseAnalyzer 测试。参考 https://zhuanlan.zhihu.com/p/143200104
     */
    public static void testSmartChineseAnalyzer(String text) throws IOException {
        System.out.println("【使用 SmartChineseAnalyzer】原语句：" + text);
        try (Analyzer analyzer = new SmartChineseAnalyzer();
             TokenStream tokenStream = analyzer.tokenStream("testField", text)) {
            tokenStream.reset();
            OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class);
            List<String> tokens = new ArrayList<>();
            while (tokenStream.incrementToken()) {
                tokens.add(offsetAttribute.toString());
            }
            tokenStream.end();
            System.out.println("【使用 SmartChineseAnalyzer】分词结果：" + tokens);
        }
    }

    /**
     * 为 SmartChineseAnalyzer 自定义停用词。参考 https://zhuanlan.zhihu.com/p/143200104
     */
    public static void testSmartChineseAnalyzerByCustom(String text) throws IOException {
        System.out.println("【使用 SmartChineseAnalyzer】原语句：" + text);
        String STOPWORD_FILE = "stopwords.txt";
        String STOPWORD_FILE_COMMENT = "//";
        InputStream inputStream = SmartChineseAnalyzerTest.class.getClassLoader().getResourceAsStream(STOPWORD_FILE);
        Reader reader = IOUtils.getDecodingReader(inputStream, StandardCharsets.UTF_8);
        CharArraySet stopWords = CharArraySet.unmodifiableSet(WordlistLoader.getWordSet(reader, STOPWORD_FILE_COMMENT));
        try (Analyzer analyzer = new SmartChineseAnalyzer(stopWords);
             TokenStream tokenStream = analyzer.tokenStream("testField", text)) {
            tokenStream.reset();
            OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class);
            List<String> tokens = new ArrayList<>();
            while (tokenStream.incrementToken()) {
                tokens.add(offsetAttribute.toString());
            }
            tokenStream.end();
            System.out.println("【使用 SmartChineseAnalyzer】分词结果：" + tokens);
        }
    }

    /**
     * 为 SmartChineseAnalyzer 实现扩展词。参考 https://zhuanlan.zhihu.com/p/143200104
     */
    public static void testMySmartChineseAnalyzer(String text) throws IOException {
        System.out.println("【使用自定义 SmartChineseAnalyzer】原语句：" + text);
        String STOPWORD_FILE = "stopwords.txt";
        String STOPWORD_FILE_COMMENT = "//";
        InputStream inputStream = SmartChineseAnalyzerTest.class.getClassLoader().getResourceAsStream(STOPWORD_FILE);
        Reader reader = IOUtils.getDecodingReader(inputStream, StandardCharsets.UTF_8);
        CharArraySet stopWords = CharArraySet.unmodifiableSet(WordlistLoader.getWordSet(reader, STOPWORD_FILE_COMMENT));
        List<String> words = Collections.singletonList("中台");
        try (Analyzer analyzer = new MySmartChineseAnalyzer(stopWords, words);
             TokenStream tokenStream = analyzer.tokenStream("testField", text)) {
            tokenStream.reset();
            OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class);
            List<String> tokens = new ArrayList<>();
            while (tokenStream.incrementToken()) {
                tokens.add(offsetAttribute.toString());
            }
            tokenStream.end();
            System.out.println("【使用自定义 SmartChineseAnalyzer】分词结果：" + tokens);
        }
    }
}

class MySmartChineseAnalyzer extends Analyzer {
    private List<String> words;
    private CharArraySet stopWords;

    public MySmartChineseAnalyzer(CharArraySet stopWords, List<String> words) {
        this.stopWords = stopWords;
        this.words = words;
    }

    @Override
    public Analyzer.TokenStreamComponents createComponents(String fieldName) {
        final Tokenizer tokenizer = new HMMChineseTokenizer();
        TokenStream result = tokenizer;
        result = new LowerCaseFilter(result);
        result = new PorterStemFilter(result);
        if (!stopWords.isEmpty()) {
            result = new StopFilter(result, stopWords);
        }
        if (!words.isEmpty()) {
            result = new ExtendWordFilter(result, words);
        }
        return new TokenStreamComponents(tokenizer, result);
    }

    @Override
    protected TokenStream normalize(String fieldName, TokenStream in) {
        return new LowerCaseFilter(in);
    }
}

class ExtendWordFilter extends TokenFilter {
    private int hadMatchedWordLength = 0;
    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
    private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
    private final List<String> extendWords;

    public ExtendWordFilter(TokenStream in, List<String> extendWords) {
        super(in);
        this.extendWords = extendWords;
    }

    @Override
    public final boolean incrementToken() throws IOException {
        int skippedPositions = 0;
        while (input.incrementToken()) {
            if (containsExtendWord()) {
                if (skippedPositions != 0) {
                    posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
                }
                return true;
            }
            skippedPositions += posIncrAtt.getPositionIncrement();
        }
        return false;
    }

    protected boolean containsExtendWord() {
        Optional<String> matchedWordOptional = extendWords.stream()
                .filter(word -> word.contains(termAtt.toString()))
                .findFirst();
        if (matchedWordOptional.isPresent()) {
            hadMatchedWordLength += termAtt.length();
            if (hadMatchedWordLength == matchedWordOptional.get().length()) {
                termAtt.setEmpty();
                termAtt.append(matchedWordOptional.get());
                return true;
            }
        } else {
            hadMatchedWordLength = 0;
        }
        return matchedWordOptional.isPresent();
    }
}