package com.tingfeng.smartcn;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import java.io.IOException;

/**
 * smartcn 中文分词 测试
 */
public class ChineseAnalyzerDemo {

    private static void doToken(TokenStream ts) throws IOException {
        ts.reset();
        CharTermAttribute cta = ts.getAttribute(CharTermAttribute.class);
        while (ts.incrementToken()) {
            System.out.print(cta.toString() + "|");
        }
        System.out.println();
        ts.end();
        ts.close();
    }

    public static void main(String[] args) {
        String etext = "Analysis is one of the main causes of slow indexing. Simply put, the more you analyze the slower analyze the indexing (in most cases).";
        String chineseText = "中华人民共和国简称中国。";

        // 核心包提供的标准分词器
        try {
            Analyzer ana = new StandardAnalyzer();
            TokenStream ts = ana.tokenStream("content", etext);
            System.out.println("标准分词器，英文分词效果：");
            doToken(ts);
            ts = ana.tokenStream("content", chineseText);
            System.out.println("标准分词器，中文分词效果：");
            doToken(ts);
        } catch (IOException e) {
            e.printStackTrace();
        }

        System.out.println("\n--------- Smart 中文分词 ------------\n");

        // smart中文分词器
        try {
            Analyzer smart = new SmartChineseAnalyzer();
            TokenStream ts = smart.tokenStream("content", etext);
            System.out.println("Smart中文分词器，英文分词效果：");
            doToken(ts);
            ts = smart.tokenStream("content", chineseText);
            System.out.println("Smart中文分词器，中文分词效果：");
            doToken(ts);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
