/**
 * IK 中文分词  版本 5.0
 * IK Analyzer release 5.0
 * <p>
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 * <p>
 * 源代码由林良益(linliangyi2005@gmail.com)提供
 * 版权声明 2012，乌龙茶工作室
 * provided by Linliangyi and copyright 2012 by Oolong studio
 */
package org.wltea.analyzer.lucene;

import com.ycy.analyzer.PrintUtil;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;

/**
 * IK分词器，Lucene Analyzer接口实现
 * 兼容Lucene 3.1以上版本
 */
public class IKAnalyzer6x extends Analyzer {
  private boolean useSmart;

  public boolean useSmart() {
    return useSmart;
  }

  public void setUseSmart(boolean useSmart) {
    this.useSmart = useSmart;
  }

  // IK分词器Lucene Analyzer接口实现类;默认细粒度切分算法
  public IKAnalyzer6x() {
    this(false);
  }

  // IK分词器Lucene Analyzer接口实现类;当为true时，分词器进行智能切分
  public IKAnalyzer6x(boolean useSmart) {
    super();
    this.useSmart = useSmart;
  }

  // 重写最新版本的createComponents;重载Analyzer接口，构造分词组件
  @Override
  protected TokenStreamComponents createComponents(String fieldName) {
    Tokenizer _IKTokenizer = new IKTokenizer6x(this.useSmart());
    return new TokenStreamComponents(_IKTokenizer);
  }

  public static void main(String[] args) {

    String text1 = "is The components of the token stream (the tokenizer and the token filters)" +
            "put accurate values into the token attributes to reflect the situation at the end of the field";

    String text = "星巴克(龙之梦2店)lucene分析器使用分词器和过滤器构成一个“管道” how，" +
            "文本在流经这个管道后成为可以进入索引的最小单位，" +
            "因此，一个标准的分析器有两个部分组成，一个是分词器tokenizer,它用于将文本按照规则切分为一个个可以进入索引的最小单位。" +
            "另外一个是TokenFilter，它主要作用是对切出来的词进行进一步的处理（如去掉敏感词、英文大小写转换、单复数处理）等。" +
            "lucene中的Tokenstram方法首先创建一个tokenizer对象处理Reader对象中的流式文本，然后利用TokenFilter对输出流进行过滤处理";

//    test1(text1,false);
//    test1(text, true);
//    test1(text, false);
//    test1("武汉市长江大桥的一天", true);
//    test1("武汉市长江大桥的一天", false);
    test1("美国马里兰州", false);

    test1("殷才永密室逃脱nook诺克密室逃脱真人密室跳脱", false);
  }

  public static void test1(String text, Boolean useSmart) {
    IKAnalyzer6x analyzer = new IKAnalyzer6x(useSmart);
    PrintUtil.displayTokenAll(text, analyzer);
  }

}
