// Character filters: html_strip 
POST _analyze
{
    "tokenizer": "keyword", // keyword是原样输出
    "char_filter": ["html_strip"], // 去掉html标签
    "text": "<b>Hello world</b>"
}

// Character filters: html_strip: mapping
POST _analyze
{
    "tokenizer": "keyword", // keyword是原样输出
    "char_filter": [
        {"type": "mapping", "mappings": ["- => _"]}
    ], 
    "text": "123-567"
}

// Character filters: html_strip: mapping
POST _analyze
{
    "tokenizer": "keyword", // keyword是原样输出
    "char_filter": [
        {"type": "mapping", "mappings": [":) => happy", ":( => sad"]}
    ], 
    "text": ["I am felling :)", "felling :( today"]
}

// Tokenizer: path_hierarchy
POST _analyze
{
    "tokenizer": "path_hierarchy",
    "text": "/usr/local/elasticsearch/config"
}

// Tokenizer: whitespace; filter: stop
POST _analyze
{
    "tokenizer": "whitespace", // 按照空格分词
    "filter": ["stop"], // 去掉 is, on  the 等停止词, 但是大写的The不会被作为停止词去掉
    "text": "The rain is Spain falls mainly on the plain"
}

// Tokenizer: whitespace; filter: lowercase, stop
POST _analyze
{
    "tokenizer": "whitespace", // 按照空格分词
    "filter": ["lowercase", "stop"], // 去掉 is, on  the 等停止词, 大写的The会被转换为the, 然后经过stop被去掉
    "text": "The rain is Spain falls mainly on the plain"
}

// Analyzer: stop analyzer
GET _analyze
{
  "analyzer": "stop", // 会去掉the, a, is等词
  "text": "2 running Quick brown-foxes leap over lazy dogs in the summer evening."
}

// Analyzer: english Analyzer
GET _analyze
{
  "analyzer": "english",//  会把 foxes -> fox, running -> run
  "text": "2 running Quick brown-foxes leap over lazy dogs in the summer evening."
}

// Analyzer: ik_smart
GET _analyze
{
  "analyzer": "ik_smart", //  最小分词-> 中华人民共和国
  "text": "中华人民共和国"
}

// Analyzer: ik_max_word
GET _analyze
{
  "analyzer": "ik_max_word", // 最多分词-> 中华人民共和国, 中华人民, 中华, 华人, 人民共和国, 人民, 共和国, 共和, 国
  "text": "中华人民共和国"
}

// Analyzer: 自定义词
// 配置好本地或者远程的词库, 我这里配置远程词库 remote_ext_dict为 http://es-update/ext.dic, 在里面增加一行, "二狗"
{
  "analyzer": "ik_smart",
  "text": "二狗"
}


// 分析组合: 1个tokenizer, 可选的token fileter, 可选的character filter
GET _analyze
{
  "tokenizer": "standard",
  "filter": ["lowercase"],
  "text": "Are You OK?"
}


// 在索引上analyze
delete my_index

PUT my_index
{
  "settings": {
    "analysis": {
      "char_filter": {"emoticons": {"type": "mapping", "mappings": [":)=>_happy_", ":(=>_sad_"]}},
      "tokenizer": {"punctuation": {"type": "pattern", "pattern": "[ .,!?]"}},
      "filter": {"english_stop": {"type": "stop", "stopwords": "aaa"}},
      "analyzer": {
        "my_analyzer": {
          "type": "custom",
          "char_filter": ["emoticons"],
          "tokenizer": "punctuation",
          "filter": ["lowercase", "english_stop"]
        }
      }
    }
  }
}

POST my_index/_analyze
{
    "analyzer": "my_analyzer",
    "text": "I'm  a :) person, and u aaa ?"
}

// 使用远程词库: 安装dynamic-synonym插件
PUT /log_nginx
{
  "settings": {
    "analysis": {
        "analyzer": {
          "keyword_trim": {
            "tokenizer": "standard",
            "filter": [ "trim" ]
          },
          "ik_synonym": {
            "tokenizer": "ik_smart",
            "filter": ["remote_synonym"]
          }
        },
        "filter": {
          "remote_synonym" : { // 已安装 dynamic-synonym 插件
            "type" : "dynamic_synonym",
            "synonyms_path" : "http://es-update/synonym.txt",
            "interval": 30
          }
        }
      }
  }
}