package com.sise.service;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.IOException;
import java.io.StringReader;

/**
 * @author idea
 * @data 2019/6/8
 */
public class SensitiveWordServiceImpl implements SensitiveWordService{

    private Directory directory;

    private IndexReader indexReader;

    private IndexSearcher indexSearcher;


    @Override
    public void sensitveWordFilter(String wordContent) throws IOException {
        //使用标准分词器的分词效果会比较一般 一个汉字分为一个词语
        Analyzer standardAnalyzer = new StandardAnalyzer();

        //使用ik分词器分出来的效果会比较好一些
        Analyzer iKAnalyzer = new IKAnalyzer();
        String result = printAnalyzerDoc(iKAnalyzer,wordContent);
        System.out.println(result);
    }

    public static void main(String[] args) throws IOException {
        SensitiveWordServiceImpl sensitiveWordService=new SensitiveWordServiceImpl();
        sensitiveWordService.sensitveWordFilter("色情,服务");
    }


    /**
     * 打印分词之后的分词信息
     *
     * @param analyzer
     * @param text
     * @throws IOException
     */
    private String printAnalyzerDoc(Analyzer analyzer, String text) throws IOException {
        TokenStream tokenStream = analyzer.tokenStream("filterWordObj", new StringReader(text));
        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        tokenStream.reset();
        StringBuilder result=new StringBuilder();
        while (tokenStream.incrementToken()) {
            result=result.append(charTermAttribute.toString()+"|");
        }
        tokenStream.end();
        tokenStream.close();
        analyzer.close();
        return result.toString();
    }


}
