#主要包括ICU、IK、PinYin分词器，PinYin分词器可以和其它分词器一起使用
1、ICU分词插件
    #wget https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-6.1.1.zip
    sudo bin/elasticsearch-plugin install [url]
    sudo bin/elasticsearch-plugin install file:///path/to/plugin.zip   #本地安装，一定要加'file://'

    #新建基于ICU的分析器
    PUT icu_sample
    {
      "settings": {
        "index": {
          "analysis": {
            "analyzer": {
              "my_icu_analyzer": {
                "tokenizer": "icu_tokenizer"
              }
            }
          }
        }
      }
    }

    #使用分析器测试分词
    POST icu_sample/_analyze
    {
      "analyzer": "my_icu_analyzer",
      "text": "สวัสดี ผมมาจากกรุงเทพฯ"
    }

    #自定义ICU分析器，添加过滤html,符合转换以及停用词
    PUT /icu_sample
    {
        "settings": {
            "analysis": {
                "char_filter": {
                    "&_to_and": {
                        "type":       "mapping",
                        "mappings": [ "&=> and "]
                }},
                "filter": {
                    "my_stopwords": {
                        "type":       "stop",
                        "stopwords": [ "the", "a" ]
                }},
                "analyzer": {
                    "my_icu_analyzer": {
                        "type":         "custom",
                        "char_filter":  [ "html_strip", "&_to_and","icu_normalizer"],
                        "tokenizer":    "icu_tokenizer",
                        "filter":       [ "lowercase", "my_stopwords" ]
                }}
    }}}

2、IK分词器
    #URL安装
    ./bin/elasticsearch-plugin install
    https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v6.1.1/elasticsearch-analysis-ik-6.1.1.zip
    #本地安装，和上面一样

    #简单使用
    PUT /ik_sample
    {
        "settings": {
            "analysis": {
                "char_filter": {
                    "&_to_and": {
                        "type":       "mapping",
                        "mappings": [ "&=> and "]
                }},
                "filter": {
                    "my_stopwords": {
                        "type":       "stop",
                        "stopwords": [ "the", "a" ]
                }},
                "analyzer": {
                    "my_ik_analyzer": {
                        "type":         "custom",
                        "char_filter":  [ "html_strip", "&_to_and"],
                        "tokenizer":    "ik_max_word",
                        "filter":       [ "lowercase", "my_stopwords" ]
                }}
    }}}

    #ik_max_word 和 ik_smart 什么区别
    ik_max_word: 会将文本做最细粒度的拆分，
    比如会将“中华人民共和国国歌”拆分为“中华人民共和国,中华人民,中华,华人,人民共和国,人民,人,民,共和国,共和,和,国国,国歌”，
    会穷尽各种可能的组合
    ik_smart: 会做最粗粒度的拆分，比如会将“中华人民共和国国歌”拆分为“中华人民共和国,国歌”

    #测试分词效果
    POST /ik_sample/_analyze
    {
      "analyzer": "my_ik_analyzer",
      "text": "自定义IK分词器，添加过滤html,符合转换以及停用词"
    }

    #使用原生的分析器
    POST /_analyze
    {
      "analyzer": "ik_smart",//ik_max_word
      "text": "信息与电子工程学院"
    }

    #使用自定义的IK分析器
    POST /ik_sample/fulltext/_mapping
    {
        "properties": {
            "content": {
                "type": "text",
                "analyzer": "ik_max_word",
                "search_analyzer": "ik_max_word"
            }
        }
    }

    #定义停用词和自定义分词
    #目前该插件支持热更新 IK 分词，使用远程链接动态更新词汇
    #IKAnalyzer.cfg.xml can be located at {conf}/analysis-ik/config/IKAnalyzer.cfg.xml#默认路径
    #{plugins}/elasticsearch-analysis-ik-*/config/IKAnalyzer.cfg.xml
        <?xml version="1.0" encoding="UTF-8"?>
        <!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
        <properties>
            <comment>IK Analyzer 扩展配置</comment>
            <!--用户可以在这里配置自己的扩展字典 -->
            <entry key="ext_dict">custom/mydict.dic;custom/single_word_low_freq.dic</entry>
             <!--用户可以在这里配置自己的扩展停止词字典-->
            <entry key="ext_stopwords">custom/ext_stopword.dic</entry>
            <!--用户可以在这里配置远程扩展字典 -->
            <entry key="remote_ext_dict">location</entry>
            <!--用户可以在这里配置远程扩展停止词字典-->
            <entry key="remote_ext_stopwords">http://xxx.com/xxx.dic</entry>
        </properties>

    #解决远程字典报错----java.net.SocketPermission 127.0.0.1:2005 connect,resolve
    vi elasticsearch/config/MyPolicy.policy
        grant {
        permission java.net.SocketPermission "*:*","accept,connect,resolve";
        };

    vi elasticsearch/config/jvm.options
        -Djava.security.policy=/home/doobo/elasticsearch/config/MyPolicy.policy

    #远程字典一定得是UTF-8编码，可以下载下面的文件，已经通过验证
    https://gitee.com/doobo/notes/raw/master/src/main/resources/ext.dic
    https://gitee.com/doobo/notes/raw/master/src/main/resources/stopword.dic

3、Pinyin分词器
    #下载地址，Download Url
    https://github.com/medcl/elasticsearch-analysis-pinyin/releases/download/v6.1.1/elasticsearch-analysis-pinyin-6.1.1.zip

    #安装，同上
    #使用方式
    POST /_analyze
    {
      "analyzer": "pinyin",
      "text": "刘德华"
    }

    #使用PinYin过滤器结合其他分词器一起使用
    PUT /my_index
    {
        "index" : {
            "analysis" : {
                "analyzer" : {
                    "user_name_analyzer" : {
                        "tokenizer" : "ik_smart",
                        "filter" : "pinyin_first_letter_and_full_pinyin_filter"
                    }
                },
                "filter" : {
                    "pinyin_first_letter_and_full_pinyin_filter" : {
                        "type" : "pinyin",
                        "keep_first_letter" : true,
                        "keep_full_pinyin" : false,
                        "keep_none_chinese" : true,
                        "keep_original" : true,//保留原始的分词结果，否则只保留拼音分词结果
                        "limit_first_letter_length" : 16,
                        "lowercase" : true,
                        "trim_whitespace" : true,
                        "keep_none_chinese_in_first_letter" : true
                    }
                }
            }
        }
    }

    #测试数据
    POST /my_index/_analyze
    {
      "analyzer": "user_name_analyzer",
      "text": "刘德华 张学友 郭富城 黎明 四大天王"
    }




