package io.renren.modules.avanotes.service.impl;

import io.renren.common.vo.TokenVo;
import io.renren.modules.avanotes.service.TokenAnalyzerService;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

/**
 * @author: xiaomi
 * @date: 2022/2/16
 * @description: 分词功能工具的抽象类
 */
public abstract class AbsIKTokenAnalyzer implements TokenAnalyzerService {

    protected IKAnalyzer analyzer;

    abstract void initAnalyzer();


    @Override
    public List<TokenVo> token(String content) throws IOException {
        List<TokenVo> list = new ArrayList<>();

        StringReader reader = new StringReader(content);
        //TokenStream tokenStream = analyzer.tokenStream("", reader);
        TokenStream tokenStream = analyzer.reusableTokenStream("", reader);
        tokenStream.addAttribute(CharTermAttribute.class);
        tokenStream.addAttribute(OffsetAttribute.class);
        while (tokenStream.incrementToken()) {
            TokenVo vo = new TokenVo();
            CharTermAttribute charTermAttribute = tokenStream.getAttribute(CharTermAttribute.class);
            vo.setWord(charTermAttribute.toString());
            OffsetAttribute off = tokenStream.getAttribute(OffsetAttribute.class);
            vo.setBeginIndex(off.startOffset());
            vo.setEndIndex(off.endOffset());

            list.add(vo);
        }
//关闭会异常
//        analyzer.close();

        reader.close();

        return list;


    }
}
