package org.scorpios.search.analyzer;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.scorpios.search.jieba.JiebaTokenizer;
import org.scorpios.search.jieba.Token;

import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;

/**
 * @author zhangxudong
 * @date 2022/11/12 20:20
 * @desc
 */
public class SearchTokenizer extends Tokenizer {
    private final static Logger LOG = LogManager.getLogger(SearchTokenizer.class);

    private List<Token> tokenBuffer;
    private BufferedReader bufferedReader;
    private int tokenIndex;
    private int finalOffset;
    private JiebaTokenizer scanner;
    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
    private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
    private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);


    public SearchTokenizer() {
        //初始化jieba分词器
        this.scanner = new JiebaTokenizer();
    }

    @Override
    public final boolean incrementToken() throws IOException {
        clearAttributes();
        // 判断当遍历完所有token信息后就返回false标识
        if (tokenBuffer == null || tokenIndex >= tokenBuffer.size()) {
            return false;
        }

        // 获取指定token指针对应的token信息
        Token token = tokenBuffer.get(tokenIndex);
        // 设置term属性单词信息
        termAtt.append(token.value);
        // 设置term的偏移量
        offsetAtt.setOffset(correctOffset(token.startPos), correctOffset(token.endPos));
        posIncrAtt.setPositionIncrement(1);
        // 对token指针+1
        tokenIndex += 1;
        // 记录最终偏移量
        finalOffset = correctOffset(token.endPos);
        return true;
    }

    @Override
    public void reset() throws IOException {
        super.reset();
        if (BufferedReader.class.isAssignableFrom(input.getClass())) {
            bufferedReader = (BufferedReader) input;
        } else {
            bufferedReader = new BufferedReader(input);
        }

        if (tokenBuffer == null) {
            tokenBuffer = new ArrayList<>();
        }

        // 从用户输入的输入流中读取文本信息
        String line = bufferedReader.readLine();
        while (line != null) {
            // 调用结巴分词进行长文本分词处理
            List<Token> tokenize = scanner.tokenize(line, true, true);
            // 这里必须要根据startPos进行从小到大排序，否则会导致入库出现偏移量错误异常
            tokenize = tokenize.stream().sorted((t1, t2) -> Integer.compare(t1.startPos, t2.startPos)).collect(Collectors.toList());
            if (tokenize != null && !tokenize.isEmpty()) {
                tokenBuffer.addAll(tokenize);
            }
            line = bufferedReader.readLine();
        }
        // 初始化token指针
        tokenIndex = 0;
    }

    @Override
    public void end() throws IOException {
        super.end();
        // 设置结束点偏移量
        offsetAtt.setOffset(finalOffset + 1, finalOffset + 1);
        tokenBuffer.clear();
    }

    @Override
    public void close() throws IOException {
        super.close();
        //关闭输入流对象
        if (bufferedReader != null) {
            bufferedReader.close();
            bufferedReader = null;
        }
        tokenBuffer.clear();
    }
}
