package com.alen.lucene.demo.helloworld;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.util.HashMap;
import java.util.Map;

public class AnalyzerTest {

    static String en = "good morning boy";
    static String ch = "你好 恭喜发财 东方明珠三生三世十里桃花";

    public static void analyzerMethod(Analyzer analyzer, String content) throws Exception {
        TokenStream tokenStream = analyzer.tokenStream("content", content);
        tokenStream.reset();
        while (tokenStream.incrementToken()) {
            System.out.println(tokenStream);
        }
    }

    //英文分词器SimpleAnalyzer测试
    public static void testSimpleAnalyzer() throws Exception {
        analyzerMethod(new MyIKAnalyzer(), ch);
    }

    public static void main(String[] args) throws Exception {
//        Map<String, Analyzer> analyzerMap = new HashMap<>();
//        analyzerMap.put("en", new SimpleAnalyzer());//使用SimpleAnalyzer分词器
//        analyzerMap.put("ch", new StandardAnalyzer());//使用StandardAnalyzer
//        //设置默认分词器
//        PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(), analyzerMap);
//        //会根据传入的字段名在PerFieldAnalyzerWrapper找到这个字段对应的分词器
//        //如果PerFieldAnalyzerWrapper没有该字段对应的分词器就会应用默认的的分词器
//        //tokenStream("content", xxxxxxxxx);根据xxxxxx来判断选择的分词器
//        TokenStream tokenStream = wrapper.tokenStream("content", ch);
//        tokenStream.reset();
//        while (tokenStream.incrementToken()) {
//            System.out.println(tokenStream);
//        }
        testSimpleAnalyzer();
    }

}
