package com.dubito.lucene.analyzer;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.junit.jupiter.api.Test;

import java.io.IOException;
import java.io.StringReader;

/**
 * @author liliang
 * @description:
 * @date 2018/4/28.
 */
public class AnalyzerTest {


    private static final String str = "but and or that 天天要吃饭上班,买车,买房";


    public static void main(String[] args) throws IOException {
        ChineseAnalyzerTest();


    }

    /**
     * 词库分词器
     * @throws IOException
     */
    private static void ChineseAnalyzerTest() throws IOException {
        //        // 查看默认的停用词
        CharArraySet defaultStopWords = SmartChineseAnalyzer.getDefaultStopSet();
        System.out.println(defaultStopWords);
//        // 自定义停用词
        CharArraySet myStopWords = new CharArraySet(Version.LUCENE_40, 0, true);
        myStopWords.add("but");
        // 加上默认的停用词
        myStopWords.addAll(defaultStopWords);
        // 创建分词器
        Analyzer analyzer = new SmartChineseAnalyzer(Version.LUCENE_40,myStopWords);
        // 分词
        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(str));
        // 迭代
        while (tokenStream.incrementToken()) {
            CharTermAttribute term = tokenStream.getAttribute(CharTermAttribute.class);
            System.out.println(term);
        }
        tokenStream.close();
    }


    /**
     * 二分法分词器
     *
     * @throws IOException
     */
    private static void CJKAnalyzerTest() throws IOException {
        // 查看默认的停用词
        CharArraySet defaultStopWords = CJKAnalyzer.getDefaultStopSet();
        System.out.println(defaultStopWords);
        // 自定义停用词
        CharArraySet myStopWords = new CharArraySet(Version.LUCENE_40, 0, true);
        myStopWords.add("天天");
        // 加上默认的停用词
        myStopWords.addAll(defaultStopWords);
        // 创建分词器
        Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_40, myStopWords);
        // 分词
        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(str));
        // 迭代
        while (tokenStream.incrementToken()) {
            CharTermAttribute term = tokenStream.getAttribute(CharTermAttribute.class);
            System.out.println(term);
        }
        tokenStream.close();
    }


    /**
     * 单字分词器
     *
     * @throws IOException
     */
    private static void StanderdAnalyzerTest() throws IOException {
        // 查看默认的停用词
        CharArraySet defaultStopWords = StandardAnalyzer.STOP_WORDS_SET;
        System.out.println(defaultStopWords);
        // 自定义停用词
        CharArraySet myStopWords = new CharArraySet(Version.LUCENE_40, 0, true);
        myStopWords.add("天");
        // 加上默认的停用词
        myStopWords.addAll(defaultStopWords);
        // 创建分词器
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40, myStopWords);
        // 分词
        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(str));
        // 迭代
        while (tokenStream.incrementToken()) {
            CharTermAttribute term = tokenStream.getAttribute(CharTermAttribute.class);
            System.out.println(term);
        }
        tokenStream.close();
    }

}
