"""
@author: 江同学呀
@file: token_filter.py
@date: 2024/11/24 20:12
@desc:
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-tokenfilters.html
    todo 增加参数的必填检测
"""
from typing import Union, List, Dict, Literal, Optional

from espc.orm.model.base.base import _Base
from espc.orm.model.scripting.script import Script


class TokenFilter(_Base):
    """
    令牌过滤器基类
    """
    type: str

    def __init__(self, name: str, **kwargs):
        super().__init__(**kwargs)
        self.name: str = name
        return

    def _build(self) -> Dict:
        body: Dict = {}
        return body


class ApostropheTokenFilter(TokenFilter):
    """
    撇号标记过滤器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-apostrophe-tokenfilter.html

    Strips all characters after an apostrophe, including the apostrophe itself.
    This filter is included in Elasticsearch’s built-in Turkish language analyzer. It uses Lucene’s ApostropheFilter,
    which was built for the Turkish language.
    去除撇号后的所有字符，包括撇号本身。
    此筛选器包含在 Elasticsearch 的内置土耳其语分析器中。它使用 Lucene 的 ApostropheFilter，它是为土耳其语构建的。
    """
    type: str = "apostrophe"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class AsciiFoldingTokenFilter(TokenFilter):
    """
    ASCII 折叠标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-asciifolding-tokenfilter.html

    Converts alphabetic, numeric, and symbolic characters that are not in the Basic Latin Unicode block (first 127
    ASCII characters) to their ASCII equivalent, if one exists. For example, the filter changes à to a.
    This filter uses Lucene’s ASCIIFoldingFilter.
    将不在基本拉丁语 Unicode 块中的字母、数字和符号字符（前 127 个 ASCII 字符）转换为其 ASCII 等效字符（如果存在）。
    例如，筛选器将 à 更改为 a。
    此筛选器使用 Lucene 的 ASCIIFoldingFilter。

    :param preserve_original:
        (Optional, Boolean) If true, emit both original tokens and folded tokens. Defaults to false.
        （可选，布尔值）如果为 true，则同时发出原始令牌和折叠令牌。默认为 false。
    """
    type: str = "asciifolding"

    def __init__(self, name: str, preserve_original: bool = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._preserve_original: bool = preserve_original
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._preserve_original is not None:
            body["preserve_original"] = self._preserve_original
        return body


class CJKBigramTokenFilter(TokenFilter):
    """
    CJK 二元语法标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-cjk-bigram-tokenfilter.html

    Forms bigrams out of CJK (Chinese, Japanese, and Korean) tokens.
    This filter is included in Elasticsearch’s built-in CJK language analyzer. It uses Lucene’s CJKBigramFilter.
    用 CJK（中文、日文和韩文）标记形成二元语法。
    此筛选器包含在 Elasticsearch 的内置 CJK 语言分析器中。它使用 Lucene 的 CJKBigramFilter。

    :param ignored_scripts:
        (Optional, array of character scripts) Array of character scripts for which to disable bigrams. Possible values:
            han
            hangul
            hiragana
            katakana
        All non-CJK input is passed through unmodified.
        （可选，字符脚本数组）要为其禁用 bigram 的字符脚本数组。可能的值：
            han
            hangul
            hiragana
            katakana
        所有非 CJK 输入都通过原封不动地传递。
    :param output_unigrams:
        (Optional, Boolean) If true, emit tokens in both bigram and unigram form. If false, a CJK character is output
        in unigram form when it has no adjacent characters. Defaults to false.
        （可选，布尔值）如果为 true，则以 bigram 和 unigram 形式发出标记。如果为 false，则当 CJK 字符没有相邻字符时，将以 unigram
        形式输出 CJK 字符。默认为 false。
    """
    type: str = "cjk_bigram"

    def __init__(self, name: str, ignored_scripts: List[Literal["han", "hangul", "hiragana", "katakana"]] = None,
                 output_unigrams: bool = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._ignored_scripts: List[Literal["han", "hangul", "hiragana", "katakana"]] = ignored_scripts
        self._output_unigrams: bool = output_unigrams
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._ignored_scripts:
            body["ignored_scripts"] = self._ignored_scripts
        if self._output_unigrams is not None:
            body["output_unigrams"] = self._output_unigrams
        return body


class CJKWidthTokenFilter(TokenFilter):
    """

    CJK 宽度标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-cjk-width-tokenfilter.html

    Normalizes width differences in CJK (Chinese, Japanese, and Korean) characters as follows:
    Folds full-width ASCII character variants into the equivalent basic Latin characters
    Folds half-width Katakana character variants into the equivalent Kana characters
    This filter is included in Elasticsearch’s built-in CJK language analyzer. It uses Lucene’s CJKWidthFilter.
    规范化 CJK（中文、日文和韩文）字符的宽度差异，如下所示：
    将全角 ASCII 字符变体折叠为等效的基本拉丁字符
    将半角片假名字符变体折叠为等效的假名字符
    此筛选器包含在 Elasticsearch 的内置 CJK 语言分析器中。它使用 Lucene 的 CJKWidthFilter。
    """
    type: str = "cjk_width"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class ClassicTokenFilter(TokenFilter):
    """
    经典令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-classic-tokenfilter.html

    Performs optional post-processing of terms generated by the classic tokenizer.
    This filter removes the english possessive ('s) from the end of words and removes dots from acronyms. It uses
    Lucene’s ClassicFilter.
    对经典分词器生成的术语执行可选的后处理。
    此过滤器从单词末尾删除英语所有格 （'s），并删除首字母缩略词中的点。它使用 Lucene 的 ClassicFilter。

    :param common_words:
        (Required*, array of strings) A list of tokens. The filter generates bigrams for these tokens.
        Either this or the common_words_path parameter is required.
        （必需*，字符串数组）令牌列表。筛选器为这些标记生成 bigram。
        this 或 common_words_path 参数是必需的。
    :param common_words_path:
        (Required*, string) Path to a file containing a list of tokens. The filter generates bigrams for these tokens.
        This path must be absolute or relative to the config location. The file must be UTF-8 encoded. Each token in
        the file must be separated by a line break.
        Either this or the common_words parameter is required.
        （必需*，字符串）包含令牌列表的文件的路径。筛选器为这些标记生成 bigram。
        此路径必须是绝对路径或相对于配置位置。该文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
        this 或 common_words 参数是必需的。
    :param ignore_case:
        (Optional, Boolean) If true, matches for common words matching are case-insensitive. Defaults to false.
        （可选，布尔值）如果为 true，则常用单词匹配的匹配项不区分大小写。默认为 false。
    :param query_mode:
        (Optional, Boolean) If true, the filter excludes the following tokens from the output:
            Unigrams for common words
            Unigrams for terms followed by common words
        Defaults to false. We recommend enabling this parameter for search analyzers.
        For example, you can enable this parameter and specify is and the as common words. This filter converts the
        tokens [the, quick, fox, is, brown] to [the_quick, quick, fox_is, is_brown,].
        （可选，布尔值）如果为 true，则筛选器将从输出中排除以下标记：
            常用词的 Unigram
            术语后跟常用词的 Unigram
        默认为 false。我们建议为搜索分析器启用此参数。
        例如，您可以启用此参数并将 is 和 the 指定为常用词。此筛选器将标记 [the， quick， fox， is， brown] 转换为 [the_quick， quick，
        fox_is， is_brown，]。
    """
    type: str = "classic"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class CommonGramsTokenFilter(TokenFilter):
    """
    常见 grams 标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-common-grams-tokenfilter.html

    Generates bigrams for a specified set of common words.
    For example, you can specify is and the as common words. This filter then converts the tokens [the, quick, fox, is,
    brown] to [the, the_quick, quick, fox, fox_is, is, is_brown, brown].
    You can use the common_grams filter in place of the stop token filter when you don’t want to completely ignore
    common words.
    This filter uses Lucene’s CommonGramsFilter.
    为一组指定的常用单词生成二元语法。
    例如，您可以将 is 和 the 指定为常用词。然后，此过滤器 将标记 [the， quick， fox， is， brown] 转换为 [the， the_quick， quick，
    福克斯，fox_is，是，is_brown，布朗]。
    当您不想完全忽略常用词时，可以使用 common_grams 筛选条件代替 stop token 筛选条件。
    此筛选器使用 Lucene 的 CommonGramsFilter。

    :param common_words:
        (Required*, array of strings) A list of tokens. The filter generates bigrams for these tokens.
        Either this or the common_words_path parameter is required.
        （必需*，字符串数组）令牌列表。筛选器为这些标记生成 bigram。
        this 或 common_words_path 参数是必需的。
    :param common_words_path:
        (Required*, string) Path to a file containing a list of tokens. The filter generates bigrams for these tokens.
        This path must be absolute or relative to the config location. The file must be UTF-8 encoded. Each token in
        the file must be separated by a line break.
        Either this or the common_words parameter is required.
        （必需*，字符串）包含令牌列表的文件的路径。筛选器为这些标记生成 bigram。
        此路径必须是绝对路径或相对于配置位置。该文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
        this 或 common_words 参数是必需的。
    :param ignore_case:
        (Optional, Boolean) If true, matches for common words matching are case-insensitive. Defaults to false.
        （可选，布尔值）如果为 true，则常用单词匹配的匹配项不区分大小写。默认为 false。
    :param query_mode:
        (Optional, Boolean) If true, the filter excludes the following tokens from the output:
            Unigrams for common words
            Unigrams for terms followed by common words
        Defaults to false. We recommend enabling this parameter for search analyzers.
        For example, you can enable this parameter and specify is and the as common words. This filter converts the
        tokens [the, quick, fox, is, brown] to [the_quick, quick, fox_is, is_brown,].
        （可选，布尔值）如果为 true，则筛选器将从输出中排除以下标记：
            常用词的 Unigram
            术语后跟常用词的 Unigram
        默认为 false。我们建议为搜索分析器启用此参数。
        例如，您可以启用此参数并将 is 和 the 指定为常用词。此筛选器将标记 [the， quick， fox， is， brown] 转换为 [the_quick， quick，
        fox_is， is_brown，]。
    """
    type: str = "common_grams"

    def __init__(
            self, name: str, common_words: Union[str, List[str]] = None, common_words_path: str = None,
            ignore_case: bool = None, query_mode: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._common_words: Union[str, List[str]] = common_words
        self._common_words_path: str = common_words_path
        self._ignore_case: bool = ignore_case
        self._query_mode: bool = query_mode
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._common_words:
            body["common_words"] = self._common_words
        if self._common_words_path:
            body["common_words_path"] = self._common_words_path
        if self._ignore_case is not None:
            body["ignore_case"] = self._ignore_case
        if self._query_mode is not None:
            body["query_mode"] = self._query_mode
        return body


class ConditionalTokenFilter(TokenFilter):
    """
    条件令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-condition-tokenfilter.html

    Applies a set of token filters to tokens that match conditions in a provided predicate script.
    This filter uses Lucene’s ConditionalTokenFilter.
    将一组标记筛选条件应用于与提供的谓词脚本中的条件匹配的标记。
    此筛选器使用 Lucene 的 ConditionalTokenFilter。

    :param filter_:
        (Required, array of token filters) Array of token filters. If a token matches the predicate script in the
        script parameter, these filters are applied to the token in the order provided.
        These filters can include custom token filters defined in the index mapping.
        （必需，令牌筛选器数组）令牌筛选器数组。如果令牌与 script 参数中的谓词脚本匹配，则这些筛选条件将按提供的顺序应用于令牌。
        这些筛选条件可以包括索引映射中定义的自定义标记筛选条件。
    :param script:
        (Required, script object) Predicate script used to apply token filters. If a token matches this script, the
        filters in the filter parameter are applied to the token.
        For valid parameters, see How to write scripts. Only inline scripts are supported. Painless scripts are
        executed in the analysis predicate context and require a token property.
        （必需，脚本对象）用于应用令牌筛选器的谓词脚本。如果令牌与此脚本匹配，则 filter 参数中的 filters 将应用于令牌。
        有关有效参数，请参阅如何编写脚本。仅支持内联脚本。Painless 脚本在分析谓词上下文中执行，并且需要 token 属性。
    """
    type: str = "condition"

    def __init__(
            self, name: str, filter_: List[Union[str, TokenFilter]] = None, script: Union[str, Dict, Script] = None,
            **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._filter: List[str] = filter_
        self._script: Union[str, Dict, Script] = script
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._filter:
            body["filter"] = [_f.type if isinstance(_f, TokenFilter) else _f for _f in self._filter]
        if self._script:
            if isinstance(self._script, Script):
                body["script"] = self._script._build()
            elif isinstance(self._script, str):
                body["script"] = {"source": self._script}
            else:
                body["script"] = self._script
        return body


class DecimalDigitTokenFilter(TokenFilter):
    """
    十进制数字令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-decimal-digit-tokenfilter.html

    Converts all digits in the Unicode Decimal_Number General Category to 0-9. For example, the filter changes the
    Bengali numeral ৩ to 3.
    This filter uses Lucene’s DecimalDigitFilter.
    将 Unicode Decimal_Number General Category 中的所有数字转换为 0-9。例如，筛选器将孟加拉数字 ৩ 更改为 3。
    此筛选器使用 Lucene 的 DecimalDigitFilter。
    """
    type: str = "decimal_digit"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class DelimitedPayloadTokenFilter(TokenFilter):
    """
    分隔的有效负载令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-delimited-payload-tokenfilter.html

    Separates a token stream into tokens and payloads based on a specified delimiter.
    For example, you can use the delimited_payload filter with a | delimiter to split the|1 quick|2 fox|3 into the
    tokens the, quick, and fox with respective payloads of 1, 2, and 3.
    This filter uses Lucene’s DelimitedPayloadTokenFilter.
    根据指定的分隔符将令牌流分为令牌和负载。
    例如，您可以使用带有 | 分隔符的 delimited_payload 筛选条件将 |1 quick|2 fox|3 拆分为令牌 the、quick 和 fox，
    其有效负载分别为 1、2 和 3。
    此筛选器使用 Lucene 的 DelimitedPayloadTokenFilter。

    :param delimiter:
        (Optional, string) Character used to separate tokens from payloads. Defaults to |.
        （可选，字符串）用于将令牌与有效负载分开的字符。默认为 |。
    :param encoding:
        (Optional, string) Data type for the stored payload. Valid values are:
            float
            identity
            int
        （可选，字符串）存储的有效负载的数据类型。有效值为：
            (Default) Float  （默认）浮
            Characters  字符
            Integer  整数
    """
    type: str = "decimal_digit"

    def __init__(
            self, name: str, delimiter: str = None, encoding: List[Literal["float", "identity", "int"]] = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._delimiter: str = delimiter
        self._encoding: List[Literal["float", "identity", "int"]] = encoding
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._delimiter:
            body["delimiter"] = self._delimiter
        if self._encoding:
            body["encoding"] = self._encoding
        return body


class DictionaryDecompounderTokenFilter(TokenFilter):
    """
    字典 decompounder 标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-dict-decomp-tokenfilter.html

    Uses a specified list of words and a brute force approach to find subwords in compound words. If found, these
    subwords are included in the token output.
    This filter uses Lucene’s DictionaryCompoundWordTokenFilter, which was built for Germanic languages.
    使用指定的单词列表和蛮力方法在复合词中查找子词。如果找到，这些子词将包含在标记输出中。
    此筛选器使用 Lucene 的 DictionaryCompoundWordTokenFilter，它是为日耳曼语言构建的。

    :param word_list:
        (Required*, array of strings) A list of subwords to look for in the token stream. If found, the subword is
        included in the token output.
        Either this parameter or word_list_path must be specified.
        （必需*，字符串数组）要在 token 流中查找的子词列表。如果找到，则子词将包含在 token 输出中。
        必须指定此参数或 word_list_path。
    :param word_list_path:
        (Required*, string) Path to a file that contains a list of subwords to find in the token stream. If found, the
        subword is included in the token output.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token
        in the file must be separated by a line break.
        Either this parameter or word_list must be specified.
        （必需*，字符串）包含要在令牌流中查找的子词列表的文件的路径。如果找到，则子词将包含在 token 输出中。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
        必须指定此参数或 word_list。
    :param max_subword_size:
        (Optional, integer) Maximum subword character length. Longer subword tokens are excluded from the output.
        Defaults to 15.
        （可选，整数）最大子字字符长度。较长的子词标记将从输出中排除。默认值为 15。
    :param min_subword_size:
        (Optional, integer) Minimum subword character length. Shorter subword tokens are excluded from the output.
        Defaults to 2.
        （可选，整数）最小子词字符长度。较短的子词标记将从输出中排除。默认值为 2。
    :param min_word_size:
        (Optional, integer) Minimum word character length. Shorter word tokens are excluded from the output.
        Defaults to 5.
        （可选，整数）最小单词字符长度。较短的单词标记将从输出中排除。默认值为 5。
    :param only_longest_match:
        (Optional, Boolean) If true, only include the longest matching subword. Defaults to false.
        （可选，布尔值）如果为 true，则仅包含最长的匹配子词。默认为 false。
    """
    type: str = "decimal_digit"

    def __init__(
            self, name: str, word_list: List[str] = None, word_list_path: str = None, max_subword_size: int = None,
            min_subword_size: int = None, min_word_size: int = None, only_longest_match: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._word_list: List[str] = word_list
        self._word_list_path: str = word_list_path
        self._max_subword_size: int = max_subword_size
        self._min_subword_size: int = min_subword_size
        self._min_word_size: int = min_word_size
        self._only_longest_match: bool = only_longest_match
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._word_list:
            body["word_list"] = self._word_list
        if self._word_list_path:
            body["word_list_path"] = self._word_list_path
        if self._max_subword_size:
            body["max_subword_size"] = self._max_subword_size
        if self._min_subword_size:
            body["min_subword_size"] = self._min_subword_size
        if self._min_word_size:
            body["min_word_size"] = self._min_word_size
        if self._only_longest_match:
            body["only_longest_match"] = self._only_longest_match
        return body


class ElisionTokenFilter(TokenFilter):
    """
    省略标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-elision-tokenfilter.html

    Removes specified elisions from the beginning of tokens. For example, you can use this filter to change l'avion to
    avion.
    When not customized, the filter removes the following French elisions by default:
    l', m', t', qu', n', s', j', d', c', jusqu', quoiqu', lorsqu', puisqu'
    Customized versions of this filter are included in several of Elasticsearch’s built-in language analyzers:
        Catalan analyzer
        French analyzer
        Irish analyzer
        Italian analyzer
    This filter uses Lucene’s ElisionFilter.

    从标记的开头删除指定的省略。例如，您可以使用此过滤器将 l'avion 更改为 avion。
    如果未自定义，则默认情况下，过滤器会删除以下法语省略：
    l'， m'， t'， qu'， n'， s'， j'， d'， c'， 直到， 虽然， 当， 自
    此筛选器的自定义版本包含在 Elasticsearch 的多个内置语言分析器中：
        Catalan analyzer 加泰罗尼亚语分析仪
        French analyzer 法语分析器
        Irish analyzer 爱尔兰分析器
        Italian analyzer 意大利分析器
    此筛选器使用 Lucene 的 ElisionFilter。

    :param articles:
        (Required*, array of string) List of elisions to remove.
        To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe.
        Both the elision and apostrophe are removed.
        For custom elision filters, either this parameter or articles_path must be specified.
        （必需*，字符串数组）要删除的省略列表。
        要删除，省略必须位于标记的开头，并且紧跟撇号。省略和撇号都被删除。
        对于自定义省略过滤器，必须指定 this parameter 或 articles_path。
    :param articles_path:
        (Required*, string) Path to a file that contains a list of elisions to remove.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each elision
        in the file must be separated by a line break.
        To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe.
        Both the elision and apostrophe are removed.
        For custom elision filters, either this parameter or articles must be specified.
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个省略都必须用换行符分隔。
        （必需*，字符串）包含要删除的省略列表的文件的路径。
        要删除，省略必须位于标记的开头，并且紧跟撇号。省略和撇号都被删除。
        对于自定义省略过滤器，必须指定此参数或 articles。
    :param articles_case:
        (Optional, Boolean) If true, elision matching is case insensitive. If false, elision matching is case sensitive.
        Defaults to false.
        （可选，布尔值）如果为 true，则省略匹配不区分大小写。如果为 false，则省略匹配区分大小写。默认为 false。
    """
    type: str = "elision"

    def __init__(
            self, name: str, articles: List[str] = None, articles_path: str = None, articles_case: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._articles: List[str] = articles
        self._articles_path: str = articles_path
        self._articles_case: bool = articles_case
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._articles is not None:
            body["articles"] = self._articles
        if self._articles_path is not None:
            body["articles_path"] = self._articles_path
        if self._articles_case is not None:
            body["articles_case"] = self._articles_case
        return body


class FingerprintTokenFilter(TokenFilter):
    """
    指纹令牌过滤器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-fingerprint-tokenfilter.html

    Sorts and removes duplicate tokens from a token stream, then concatenates the stream into a single output token.
    For example, this filter changes the [ the, fox, was, very, very, quick ] token stream as follows:
        1. Sorts the tokens alphabetically to [ fox, quick, the, very, very, was ]
        2. Removes a duplicate instance of the very token.
        3. Concatenates the token stream to a output single token: [fox quick the very was ]
    Output tokens produced by this filter are useful for fingerprinting and clustering a body of text as described in the OpenRefine project.
    This filter uses Lucene’s FingerprintFilter.

    对令牌流中的重复令牌进行排序和删除，然后将流连接为单个输出令牌。
    例如，此过滤器更改 [ the， fox， was， very， very， very ， quick ] 令牌流，如下所示：
        1. 按字母顺序将标记排序为 [ fox， quick， the， very， very， was ]
        2. 删除 exactly token 的重复实例。
        3. 将标记流连接到输出单个标记：[fox quick the very was ]
    此过滤器生成的输出标记可用于对文本正文进行指纹识别和聚类，如 OpenRefine 项目中所述。
    此筛选器使用 Lucene 的 FingerprintFilter。

    :param max_output_size:
        (Optional, integer) Maximum character length, including whitespace, of the output token. Defaults to 255.
        Concatenated tokens longer than this will result in no token output.
        （可选，整数）输出标记的最大字符长度（包括空格）。默认值为 255。如果连接标记的时间超过此时间，则不会产生任何标记输出。
    :param separator:
        (Optional, string) Character to use to concatenate the token stream input. Defaults to a space.
        （可选，字符串）用于连接标记流输入的字符。默认为空格。
    """
    type: str = "fingerprint"

    def __init__(
            self, name: str, max_output_size: int = None, separator: str = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._max_output_size: int = max_output_size
        self._separator: str = separator
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_output_size is not None:
            body["max_output_size"] = self._max_output_size
        if self._separator:
            body["separator"] = self._separator
        return body


class FlattenGraphTokenFilter(TokenFilter):
    """
    展平图形标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-flatten-graph-tokenfilter.html

    Flattens a token graph produced by a graph token filter, such as synonym_graph or word_delimiter_graph.
    Flattening a token graph containing multi-position tokens makes the graph suitable for indexing. Otherwise,
    indexing does not support token graphs containing multi-position tokens.

    展平由图形标记筛选器（如 synonym_graph 或 word_delimiter_graph）生成的标记图。
    展平包含多位置标记的标记图使该图适合索引。否则，索引不支持包含多位置标记的标记图。
    """
    type: str = "flatten_graph"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class HunspellTokenFilter(TokenFilter):
    """
    标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-hunspell-tokenfilter.html

    Provides dictionary stemming based on a provided Hunspell dictionary. The hunspell filter requires configuration of
    one or more language-specific Hunspell dictionaries.
    This filter uses Lucene’s HunspellStemFilter.
    根据提供的 Hunspell 词典提供词典词干提取。hunspell 过滤器需要配置一个或多个特定于语言的 Hunspell 词典。
    此筛选器使用 Lucene 的 HunspellStemFilter。

    :param dictionary:
        (Optional, string or array of strings) One or more .dic files (e.g, en_US.dic, my_custom.dic) to use for the
        Hunspell dictionary.
        By default, the hunspell filter uses all .dic files in the <$ES_PATH_CONF>/hunspell/<locale> directory
        specified using the lang, language, or locale parameter.
        （可选，字符串或字符串数组）一个或多个 .dic 文件（例如，en_US.dic、my_custom.dic）用于 Hunspell 词典。
        默认情况下，hunspell 过滤器使用使用 lang、language 或 locale 参数指定的 <$ES_PATH_CONF>/hunspell/<locale> 目录中的
        所有 .dic 文件。
    :param dedup:
        (Optional, Boolean) If true, duplicate tokens are removed from the filter’s output. Defaults to true.
        （可选，布尔值）如果为 true，则从过滤器的输出中删除重复的标记。默认为 true。
    :param lang:
        (Required*, string) An alias for the locale parameter.
        If this parameter is not specified, the language or locale parameter is required.
        （必需*，字符串）locale 参数的别名。
        如果未指定此参数，则 language 或 locale 参数是必需的。
    :param language:
        (Required*, string) An alias for the locale parameter.
        If this parameter is not specified, the lang or locale parameter is required.
        （必需*，字符串）locale 参数的别名。
        如果未指定此参数，则 lang 或 locale 参数是必需的。
    :param locale:
        (Required*, string) Locale directory used to specify the .aff and .dic files for a Hunspell dictionary.
        See Configure Hunspell dictionaries.
        If this parameter is not specified, the lang or language parameter is required.
        （必需*，字符串）用于指定 Hunspell 词典的 .aff 和 .dic 文件的区域设置目录。请参阅配置 Hunspell 词典。
        如果未指定此参数，则 lang 或 language 参数是必需的。
    :param longest_only:
        (Optional, Boolean) If true, only the longest stemmed version of each token is included in the output.
        If false, all stemmed versions of the token are included. Defaults to false.
        （可选，布尔值）如果为 true，则输出中仅包含每个标记的最长词干版本。如果为 false，则包含令牌的所有词干版本。默认为 false。
    """
    type: str = "hunspell"

    def __init__(
            self, name: str, dictionary: Union[str, List[str]] = None, dedup: bool = None, lang: str = None,
            language: str = None, locale: str = None, longest_only: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._dictionary: Union[str, List[str]] = dictionary
        self._dedup: bool = dedup
        self._lang: str = lang
        self._language: str = language
        self._locale: str = locale
        self._longest_only: bool = longest_only
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._dictionary:
            body["dictionary"] = self._dictionary
        if self._dedup is not None:
            body["dedup"] = self._dedup
        if self._lang:
            body["lang"] = self._lang
        if self._language:
            body["language"] = self._language
        if self._locale:
            body["locale"] = self._locale
        if self._longest_only is not None:
            body["longest_only"] = self._longest_only
        return body


class HyphenationDecompounderTokenFilter(DictionaryDecompounderTokenFilter):
    """
    连字 decompounder筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-hyp-decomp-tokenfilter.html

    Uses XML-based hyphenation patterns to find potential subwords in compound words. These subwords are then checked
    against the specified word list. Subwords not in the list are excluded from the token output.
    This filter uses Lucene’s HyphenationCompoundWordTokenFilter, which was built for Germanic languages.
    使用基于 XML 的断词模式查找复合词中的潜在子词。然后，根据指定的单词列表检查这些子单词。不在列表中的子词将从标记输出中排除。
    此筛选器使用 Lucene 的 HyphenationCompoundWordTokenFilter，它是为日耳曼语言构建的。

    :param hyphenation_patterns_path:
        (Required, string) Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.
        This path must be absolute or relative to the config location. Only FOP v1.2 compatible files are supported.
        （必需，字符串）Apache FOP （Formatting Objects Processor） XML 断字模式文件的路径。
        此路径必须是绝对路径或相对于配置位置。仅支持 FOP v1.2 兼容文件。
    :param word_list:
        (Required*, array of strings) A list of subwords to look for in the token stream. If found, the subword is
        included in the token output.
        Either this parameter or word_list_path must be specified.
        （必需*，字符串数组）要在 token 流中查找的子词列表。如果找到，则子词将包含在 token 输出中。
        必须指定此参数或 word_list_path。
    :param word_list_path:
        (Required*, string) Path to a file that contains a list of subwords to find in the token stream. If found, the
        subword is included in the token output.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token
        in the file must be separated by a line break.
        Either this parameter or word_list must be specified.
        （必需*，字符串）包含要在令牌流中查找的子词列表的文件的路径。如果找到，则子词将包含在 token 输出中。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
        必须指定此参数或 word_list。
    :param max_subword_size:
        (Optional, integer) Maximum subword character length. Longer subword tokens are excluded from the output.
        Defaults to 15.
        （可选，整数）最大子字字符长度。较长的子词标记将从输出中排除。默认值为 15。
    :param min_subword_size:
        (Optional, integer) Minimum subword character length. Shorter subword tokens are excluded from the output.
        Defaults to 2.
        （可选，整数）最小子词字符长度。较短的子词标记将从输出中排除。默认值为 2。
    :param min_word_size:
        (Optional, integer) Minimum word character length. Shorter word tokens are excluded from the output.
        Defaults to 5.
        （可选，整数）最小单词字符长度。较短的单词标记将从输出中排除。默认值为 5。
    :param only_longest_match:
        (Optional, Boolean) If true, only include the longest matching subword. Defaults to false.
        （可选，布尔值）如果为 true，则仅包含最长的匹配子词。默认为 false。
    """
    type: str = "hyphenation_decompounder"

    def __init__(
            self, name: str, hyphenation_patterns_path: str = None, word_list: List[str] = None,
            word_list_path: str = None, max_subword_size: int = None, min_subword_size: int = None,
            min_word_size: int = None, only_longest_match: bool = None, **kwargs
    ):
        super().__init__(
            name=name, word_list=word_list, word_list_path=word_list_path, max_subword_size=max_subword_size,
            min_subword_size=min_subword_size, min_word_size=min_word_size, only_longest_match=only_longest_match,
            **kwargs
        )
        self._hyphenation_patterns_path: str = hyphenation_patterns_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._hyphenation_patterns_path:
            body["hyphenation_patterns_path"] = self._hyphenation_patterns_path
        return body


class KeepTypesTokenFilter(TokenFilter):
    """
    Keep types 标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keep-types-tokenfilter.html

    Keeps or removes tokens of a specific type. For example, you can use this filter to change 3 quick foxes to quick
    foxes by keeping only <ALPHANUM> (alphanumeric) tokens.
    保留或删除特定类型的令牌。例如，您可以使用此筛选器通过仅保留 <ALPHANUM>（字母数字）标记，将 3 个 quick foxes 更改为 quick foxes。

    :param types:
        (Required, array of strings) List of token types to keep or remove.
        （必需，字符串数组）要保留或删除的令牌类型列表。
    :param word_list:
        (Optional, string) Indicates whether to keep or remove the specified token types. Valid values are:
            include
                (Default) Keep only the specified token types.
                （默认）仅保留指定的令牌类型。
            exclude
                Remove the specified token types.
                删除指定的令牌类型。
        （可选，字符串）指示是保留还是删除指定的令牌类型。有效值为：
            include
                （默认）仅保留指定的令牌类型。
            exclude
                删除指定的令牌类型。
    """
    type: str = "keep_types"

    def __init__(
            self, name: str, types: List[str] = None, word_list: Literal["include", "exclude"] = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._types: List[str] = types
        self._word_list: Optional[Literal["include", "exclude"]] = word_list
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._types:
            body["types"] = self._types
        if self._word_list:
            body["word_list"] = self._word_list
        return body


class KeepWordsTokenFilter(TokenFilter):
    """
    保留单词标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keep-words-tokenfilter.html

    Keeps only tokens contained in a specified word list.
    This filter uses Lucene’s KeepWordFilter.
    仅保留指定单词列表中包含的标记。
    此筛选器使用 Lucene 的 KeepWordFilter。

    :param keep_words:
        (Required*, array of strings) List of words to keep. Only tokens that match words in this list are included in
        the output.
        Either this parameter or keep_words_path must be specified.
        （必需*，字符串数组）要保留的单词列表。输出中仅包含与此列表中的单词匹配的标记。
        必须指定此参数或 keep_words_path。
    :param keep_words_path:
        (Required*, array of strings) Path to a file that contains a list of words to keep. Only tokens that match
        words in this list are included in the output.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each word
        in the file must be separated by a line break.
        Either this parameter or keep_words must be specified.
        （必需*，字符串数组）包含要保留的单词列表的文件的路径。输出中仅包含与此列表中的单词匹配的标记。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个单词都必须用换行符分隔。
        必须指定此参数或 keep_words。
    :param keep_words_case:
        (Optional, Boolean) If true, lowercase all keep words. Defaults to false.
        （可选，布尔值）如果为 true，则小写所有保留单词。默认为 false。
    """
    type: str = "keep"

    def __init__(
            self, name: str, keep_words: List[str] = None, keep_words_path: List[str] = None,
            keep_words_case: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._keep_words: List[str] = keep_words
        self._keep_words_path: List[str] = keep_words_path
        self._keep_words_case: bool = keep_words_case
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._keep_words:
            body["keep_words"] = self._keep_words
        if self._keep_words_path:
            body["keep_words_path"] = self._keep_words_path
        if self._keep_words_case:
            body["keep_words_case"] = self._keep_words_case
        return body


class KeywordMarkerTokenFilter(TokenFilter):
    """
    关键字标记过滤器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keyword-marker-tokenfilter.html

    Marks specified tokens as keywords, which are not stemmed.
    The keyword_marker filter assigns specified tokens a keyword attribute of true. Stemmer token filters, such as
    stemmer or porter_stem, skip tokens with a keyword attribute of true.

    将指定的标记标记为关键字，这些标记不进行词干提取。
    keyword_marker 筛选器为指定标记分配 true 的 keyword 属性。词干分析器标记过滤器（如 stemmer 或 porter_stem）会跳过 keyword
    属性为 true 的标记。

    :param ignore_case:
        (Optional, Boolean) If true, matching for the keywords and keywords_path parameters ignores letter case.
        Defaults to false.
        （可选，布尔值）如果为 true，则 keywords 和 keywords_path 参数的匹配将忽略字母大小写。默认为 false。
    :param keywords:
        (Required*, array of strings) Array of keywords. Tokens that match these keywords are not stemmed.
        This parameter, keywords_path, or keywords_pattern must be specified. You cannot specify this parameter and
        keywords_pattern.
        （必需*，字符串数组）关键字数组。与这些关键字匹配的标记不会进行词干提取。
        必须指定此参数 keywords_path 或 keywords_pattern。您不能指定此参数并keywords_pattern。
    :param keywords_path:
        (Required*, string) Path to a file that contains a list of keywords. Tokens that match these keywords are
        not stemmed.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each word
        in the file must be separated by a line break.
        This parameter, keywords, or keywords_pattern must be specified. You cannot specify this parameter and
        keywords_pattern.
        （必需*，字符串）包含关键字列表的文件的路径。与这些关键字匹配的标记不会进行词干提取。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个单词都必须用换行符分隔。
        必须指定此参数、关键字或keywords_pattern。您不能指定此参数并keywords_pattern。
    :param keywords_pattern:
        (Required*, string) Java regular expression used to match tokens. Tokens that match this expression are
        marked as keywords and not stemmed.
        This parameter, keywords, or keywords_path must be specified. You cannot specify this parameter and keywords
        or keywords_pattern.
        （必需*，字符串）用于匹配令牌的 Java 正则表达式。与此表达式匹配的标记将标记为关键字，而不是词干。
        必须指定此参数、关键字或keywords_path。您不能指定此参数和关键字或 keywords_pattern。
    """
    type: str = "keyword_marker"

    def __init__(
            self, name: str, ignore_case: bool = None, keywords: List[str] = None, keywords_path: str = None,
            keywords_pattern: str = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._ignore_case: bool = ignore_case
        self._keywords: List[str] = keywords
        self._keywords_path: str = keywords_path
        self._keywords_pattern: str = keywords_pattern
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._ignore_case:
            body["ignore_case"] = self._ignore_case
        if self._keywords:
            body["keywords"] = self._keywords
        if self._keywords_path:
            body["keywords_path"] = self._keywords_path
        if self._keywords_pattern:
            body["keywords_pattern"] = self._keywords_pattern
        return body


class KeywordRepeatTokenFilter(TokenFilter):
    """
    关键字重复标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keyword-repeat-tokenfilter.html

    Outputs a keyword version of each token in a stream. These keyword tokens are not stemmed.
    The keyword_repeat filter assigns keyword tokens a keyword attribute of true. Stemmer token filters, such as
    stemmer or porter_stem, skip tokens with a keyword attribute of true.
    You can use the keyword_repeat filter with a stemmer token filter to output a stemmed and unstemmed version of
    each token in a stream.
    输出流中每个令牌的关键字版本。这些关键字标记没有词干。
    keyword_repeat 筛选条件为关键字标记分配 keyword 属性 true。词干分析器标记过滤器（如 stemmer 或 porter_stem）会跳过 keyword 属性
    为 true 的标记。
    您可以将 keyword_repeat 筛选器与 stemmer 标记筛选器一起使用，以输出流中每个标记的词干和无词干版本。
    """
    type: str = "keyword_repeat"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class KStemTokenFilter(TokenFilter):
    """
    KStem 令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-kstem-tokenfilter.html

    Provides KStem-based stemming for the English language. The kstem filter combines algorithmic stemming with a
    built-in dictionary.
    The kstem filter tends to stem less aggressively than other English stemmer filters, such as the porter_stem filter.
    The kstem filter is equivalent to the stemmer filter’s light_english variant.
    This filter uses Lucene’s KStemFilter.
    为英语提供基于 KStem 的词干提取。kstem 过滤器将算法词干提取与内置词典相结合。
    kstem 过滤器的词干往往比其他英式词干分析器过滤器（如 porter_stem 过滤器）不那么激进。
    kstem 过滤器等效于 stemmer 过滤器的 light_english 变体。
    此筛选器使用 Lucene 的 KStemFilter。
    """
    type: str = "kstem"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class LengthTokenFilter(TokenFilter):
    """
    长度标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-length-tokenfilter.html

    Removes tokens shorter or longer than specified character lengths. For example, you can use the length filter to
    exclude tokens shorter than 2 characters and tokens longer than 5 characters.
    This filter uses Lucene’s LengthFilter.
    删除短于或长于指定字符长度的标记。例如，您可以使用长度筛选条件来排除短于 2 个字符的标记和长于 5 个字符的标记。
    此筛选器使用 Lucene 的 LengthFilter。

    :param min_:
        (Optional, integer) Minimum character length of a token. Shorter tokens are excluded from the output.
        Defaults to 0.
        （可选，整数）令牌的最小字符长度。较短的标记将从输出中排除。默认为 0。
    :param max_:
        (Optional, integer) Maximum character length of a token. Longer tokens are excluded from the output.
        Defaults to Integer.MAX_VALUE, which is 2^31-1 or 2147483647.
        （可选，整数）令牌的最大字符长度。较长的令牌将从输出中排除。默认为 Integer.MAX_VALUE，即 2^31-1 或 2147483647。
    """
    type: str = "length"

    def __init__(self, name: str, min_: int = None, max_: int = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._min: int = min_
        self._max: int = max_
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._min is not None:
            body["min"] = self._min
        if self._max is not None:
            body["max"] = self._max
        return body


class LimitTokenCountTokenFilter(TokenFilter):
    """
    限制令牌计数令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-limit-token-count-tokenfilter.html

    Limits the number of output tokens. The limit filter is commonly used to limit the size of document field values
    based on token count.
    By default, the limit filter keeps only the first token in a stream. For example, the filter can change the token
    stream [ one, two, three ] to [ one ].
    This filter uses Lucene’s LimitTokenCountFilter.
    限制输出令牌的数量。限制过滤器通常用于根据令牌计数限制文档字段值的大小。
    默认情况下，限制筛选条件仅保留流中的第一个令牌。例如，过滤器可以将标记流 [ one， two， three ] 更改为 [ one]。
    此筛选器使用 Lucene 的 LimitTokenCountFilter。

    :param max_token_count:
        (Optional, integer) Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are
        excluded from the output. Defaults to 1.
        （可选，整数）要保留的最大令牌数。达到此限制后，将从输出中排除任何剩余的令牌。默认值为 1。
    :param consume_all_tokens:
        (Optional, Boolean) If true, the limit filter exhausts the token stream, even if the max_token_count has
        already been reached. Defaults to false.
        （可选，布尔值）如果为 true，则 limit 过滤器会耗尽令牌流，即使已达到 max_token_count。默认为 false。
    """
    type: str = "limit"

    def __init__(self, name: str, max_token_count: int = None, consume_all_tokens: bool = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._max_token_count: int = max_token_count
        self._consume_all_tokens: bool = consume_all_tokens
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_count is not None:
            body["max_token_count"] = self._max_token_count
        if self._consume_all_tokens is not None:
            body["consume_all_tokens"] = self._consume_all_tokens
        return body


class LowercaseTokenFilter(TokenFilter):
    """
    小写标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-lowercase-tokenfilter.html

    Changes token text to lowercase. For example, you can use the lowercase filter to change THE Lazy DoG to the lazy
    dog.
    In addition to a default filter, the lowercase token filter provides access to Lucene’s language-specific lowercase
    filters for Greek, Irish, and Turkish.
    将标记文本更改为小写。例如，您可以使用小写过滤器将 THE Lazy DoG 更改为 lazy dog。
    除了默认筛选器之外，小写标记筛选器还提供对 Lucene 的希腊语、爱尔兰语和土耳其语的特定语言小写筛选器的访问。

    :param language:
    """
    type: str = "lowercase"

    def __init__(self, name: str, language: str = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._language: str = language
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._language:
            body["language"] = self._language
        return body


class MinHashTokenFilter(TokenFilter):
    """
    MinHash 令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-minhash-tokenfilter.html

    Uses the MinHash technique to produce a signature for a token stream. You can use MinHash signatures to estimate the similarity of documents. See Using the min_hash token filter for similarity search.
    The min_hash filter performs the following operations on a token stream in order:
        1. Hashes each token in the stream.
        2. Assigns the hashes to buckets, keeping only the smallest hashes of each bucket.
        3. Outputs the smallest hash from each bucket as a token stream.
    This filter uses Lucene’s MinHashFilter.
    使用 MinHash 技术为 Token 流生成签名。您可以使用 MinHash 签名来估计文档的相似度。请参阅使用 min_hash 标记筛选条件进行相似性搜索。
    min_hash 筛选条件按顺序对 Token 流执行以下操作：
        1. 对流中的每个 Token 进行哈希处理。
        2. 将哈希值分配给存储桶，仅保留每个存储桶的最小哈希值。
        3. 将每个存储桶中的最小哈希值输出为令牌流。
    此筛选器使用 Lucene 的 MinHashFilter。

    :param bucket_count:
        (Optional, integer) Number of buckets to which hashes are assigned. Defaults to 512.
        （可选，整数）分配哈希的存储桶数。默认值为 512。
    :param hash_count:
        (Optional, integer) Number of ways to hash each token in the stream. Defaults to 1.
        （可选，整数）对流中每个令牌进行哈希处理的方法数。默认值为 1。
    :param hash_set_size:
        (Optional, integer) Number of hashes to keep from each bucket. Defaults to 1.
        Hashes are retained by ascending size, starting with the bucket’s smallest hash first.
        （可选，整数）每个存储桶中要保留的哈希数。默认值为 1。
        哈希按大小升序保留，首先从存储桶的最小哈希开始。
    :param with_rotation:
        (Optional, Boolean) If true, the filter fills empty buckets with the value of the first non-empty bucket to its
        circular right if the hash_set_size is 1. If the bucket_count argument is greater than 1, this parameter
        defaults to true. Otherwise, this parameter defaults to false.
        （可选，布尔值）如果为 true，则筛选条件使用第一个非空存储桶的值填充空存储桶（如果hash_set_size为 1）。如果 bucket_count 参数
        大于 1，则此参数默认为 true。否则，此参数默认为 false。
    """
    type: str = "min_hash"

    def __init__(self, name: str, bucket_count: int = None, hash_count: int = None, hash_set_size: int = None,
                 with_rotation: bool = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._bucket_count: int = bucket_count
        self._hash_count: int = hash_count
        self._hash_set_size: int = hash_set_size
        self._with_rotation: bool = with_rotation
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._bucket_count is not None:
            body["bucket_count"] = self._bucket_count
        if self._hash_count is not None:
            body["hash_count"] = self._hash_count
        if self._hash_set_size is not None:
            body["hash_set_size"] = self._hash_set_size
        if self._with_rotation is not None:
            body["with_rotation"] = self._with_rotation
        return body


class MultiplexerTokenFilter(AsciiFoldingTokenFilter):
    """
    多路复用器令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-multiplexer-tokenfilter.html

    A token filter of type multiplexer will emit multiple tokens at the same position, each version of the token having
    been run through a different filter. Identical output tokens at the same position will be removed.
    多路复用器类型的令牌过滤器将在同一位置发出多个令牌，令牌的每个版本都通过不同的过滤器运行。将删除相同位置的相同输出令牌。

    :param filters:
        a list of token filters to apply to incoming tokens. These can be any token filters defined elsewhere in the
        index mappings. Filters can be chained using a comma-delimited string, so for example "lowercase, porter_stem"
        would apply the lowercase filter and then the porter_stem filter to a single token.
        要应用于传入令牌的令牌筛选器列表。这些可以是索引映射中其他位置定义的任何标记过滤器。可以使用逗号分隔的字符串将筛选器链接起来，
        例如“lowercase， porter_stem”会将小写筛选器，然后porter_stem筛选器应用于单个标记。
    :param preserve_original:
        if true (the default) then emit the original token in addition to the filtered tokens
        如果为 true（默认值），则除了筛选的令牌外，还发出原始令牌
    """
    type: str = "multiplexer"

    def __init__(
            self, name: str, filters: List[Union[str, TokenFilter]] = None, preserve_original: bool = None, **kwargs
    ):
        super().__init__(name=name, preserve_original=preserve_original, **kwargs)
        self._filters: List[Union[str, TokenFilter]] = filters
        self._preserve_original: bool = preserve_original
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._filters:
            body["filters"] = [_f.type if isinstance(_f, TokenFilter) else _f for _f in self._filters]
        if self._preserve_original is not None:
            body["preserve_original"] = self._preserve_original
        return body


class NGramTokenFilter(AsciiFoldingTokenFilter):
    """
    N-gram 标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-ngram-tokenfilter.html

    Forms n-grams of specified lengths from a token.
    For example, you can use the ngram token filter to change fox to [ f, fo, o, ox, x ].
    This filter uses Lucene’s NGramTokenFilter.
    从标记中形成指定长度的 n-gram。
    例如，您可以使用 ngram 标记过滤器将 fox 更改为 [ f， fo， o， ox， x ]。
    此筛选器使用 Lucene 的 NGramTokenFilter。

    :param max_gram:
        (Optional, integer) Maximum length of characters in a gram. Defaults to 2.
        （可选，整数）gram 中的最大字符长度。默认值为 2。
    :param min_gram:
        (Optional, integer) Minimum length of characters in a gram. Defaults to 1.
        （可选，整数）gram 中字符的最小长度。默认值为 1。
    :param preserve_original:
        (Optional, Boolean) Emits original token when set to true. Defaults to false.
        （可选，布尔值）设置为 true 时发出原始令牌。默认为 false。
    """
    type: str = "ngram"

    def __init__(self, name: str, max_gram: int = None, min_gram: int = None, preserve_original: bool = None, **kwargs):
        super().__init__(name=name, preserve_original=preserve_original, **kwargs)
        self._max_gram: int = max_gram
        self._min_gram: int = min_gram
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_gram is not None:
            body["max_gram"] = self._max_gram
        if self._min_gram is not None:
            body["min_gram"] = self._min_gram
        return body


class EdgeNGramTokenFilter(NGramTokenFilter):
    """
    Edge n-gram 标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-edgengram-tokenfilter.html

    Forms an n-gram of a specified length from the beginning of a token.
    For example, you can use the edge_ngram token filter to change quick to qu.
    When not customized, the filter creates 1-character edge n-grams by default.
    This filter uses Lucene’s EdgeNGramTokenFilter.
    从标记的开头开始形成指定长度的 n-gram。
    例如，您可以使用 edge_ngram 标记筛选条件将 quick 更改为 qu。
    如果未自定义，则默认情况下，筛选器会创建 1 个字符的边缘 n-gram。
    此筛选器使用 Lucene 的 EdgeNGramTokenFilter。

    :param max_gram:
        (Optional, integer) Maximum character length of a gram. For custom token filters, defaults to 2. For the
        built-in edge_ngram filter, defaults to 1.
        See Limitations of the max_gram parameter.
        （可选，整数）gram 的最大字符长度。对于自定义令牌筛选器，默认为 2。对于内置 edge_ngram 筛选器，默认为 1。
        请参阅 max_gram 参数的限制。
    :param min_gram:
        (Optional, integer) Minimum character length of a gram. Defaults to 1.
        （可选，整数）gram 的最小字符长度。默认值为 1。
    :param preserve_original:
        (Optional, Boolean) Emits original token when set to true. Defaults to false.
        （可选，布尔值）设置为 true 时发出原始令牌。默认为 false。
    :param side:
        (Optional, string) Deprecated. Indicates whether to truncate tokens from the front or back. Defaults to front.
        Instead of using the back value, you can use the reverse token filter before and after the edge_ngram filter to
        achieve the same results.
        （可选，字符串）荒废的。指示是从前面还是后面截断令牌。默认为 front。
        您可以在 edge_ngram 筛选器之前和之后使用 reverse token 筛选器，而不是使用 back 值来获得相同的结果。
    """
    type: str = "edge_ngram"

    def __init__(
            self, name: str, max_gram: int = None, min_gram: int = None, preserve_original: bool = None,
            side: str = None, **kwargs
    ):
        super().__init__(name=name, max_gram=max_gram, min_gram=min_gram, preserve_original=preserve_original, **kwargs)
        self._side: str = side
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._side:
            body["side"] = self._side
        return body


class NormalizationTokenFilter(TokenFilter):
    """
    规范化令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-normalization-tokenfilter.html

    There are several token filters available which try to normalize special characters of a certain language.
    有几个可用的标记过滤器可以尝试规范化某种语言的特殊字符。

    todo
    """
    type: str = "normalize"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class PatternCaptureTokenFilter(AsciiFoldingTokenFilter):
    """
    模式捕获令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-pattern-capture-tokenfilter.html

    The pattern_capture token filter, unlike the pattern tokenizer, emits a token for every capture group in the
    regular expression. Patterns are not anchored to the beginning and end of the string, so each pattern can match
    multiple times, and matches are allowed to overlap.
    与模式分词器不同，pattern_capture 标记筛选器为正则表达式中的每个捕获组发出一个标记。模式不锚定到字符串的开头和结尾，因此每个模式可以
    匹配多次，并且匹配项可以重叠。

    :param preserve_original:
        (Optional, Boolean) If true, emit both original tokens and folded tokens. Defaults to false.
        （可选，布尔值）如果为 true，则同时发出原始令牌和折叠令牌。默认为 false。
    :param patterns:

    """
    type: str = "pattern_capture"

    def __init__(self, name: str, preserve_original: bool = None, patterns: List[str] = None, **kwargs):
        super().__init__(name=name, preserve_original=preserve_original, **kwargs)
        self._patterns: List[str] = patterns
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._patterns:
            body["patterns"] = self._patterns
        return body


class PatternReplaceTokenFilter(TokenFilter):
    """
    模式替换令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-pattern_replace-tokenfilter.html

    Uses a regular expression to match and replace token substrings.
    The pattern_replace filter uses Java’s regular expression syntax. By default, the filter replaces matching
    substrings with an empty substring (""). Replacement substrings can use Java’s $g syntax to reference capture
    groups from the original token text.

    使用正则表达式匹配和替换标记子字符串。
    pattern_replace 过滤器使用 Java 的正则表达式语法。默认情况下，筛选条件会将匹配的子字符串替换为空子字符串 （“”）。替换子字符串可以
    使用 Java 的 $g 语法从原始标记文本中引用捕获组。

    :param all_:
        (Optional, Boolean) If true, all substrings matching the pattern parameter’s regular expression are replaced.
        If false, the filter replaces only the first matching substring in each token. Defaults to true.
        （可选，布尔值）如果为 true，则替换与 pattern 参数的正则表达式匹配的所有子字符串。如果为 false，则筛选条件仅替换每个标记中的第一个
        匹配子字符串。默认为 true。
    :param pattern:
        (Required, string) Regular expression, written in Java’s regular expression syntax. The filter replaces token
        substrings matching this pattern with the substring in the replacement parameter.
        （必需，字符串）正则表达式，用 Java 的正则表达式语法编写。筛选条件将匹配此模式的标记子字符串替换为 replacement 参数中的子字符串。
    :param replacement:
        (Optional, string) Replacement substring. Defaults to an empty substring ("").
        （可选，字符串）替换子字符串。默认为空子字符串 （“”）。
    """
    type: str = "pattern_replace"

    def __init__(self, name: str, all_: bool = None, pattern: str = None, replacement: str = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._all: bool = all_
        self._pattern: str = pattern
        self._replacement: str = replacement
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._all is not None:
            body["all"] = self._all
        if self._pattern:
            body["pattern"] = self._pattern
        if self._replacement:
            body["replacement"] = self._replacement
        return body


class PhoneticTokenFilter(TokenFilter):
    """
    音标筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-phonetic-tokenfilter.html

    The phonetic token filter is provided as the analysis-phonetic plugin.
    phonetic token 过滤器作为 analysis-phonetic 插件提供。
    """
    type: str = "phonetic"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class PorterStemTokenFilter(TokenFilter):
    """
    Porter 词干标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-porterstem-tokenfilter.html

    Provides algorithmic stemming for the English language, based on the Porter stemming algorithm.
    This filter tends to stem more aggressively than other English stemmer filters, such as the kstem filter.
    The porter_stem filter is equivalent to the stemmer filter’s english variant.
    The porter_stem filter uses Lucene’s PorterStemFilter.
    根据 Porter 词干提取算法为英语提供算法词干提取。
    此过滤器往往比其他英式词干分析器过滤器（如 kstem 过滤器）更激进地进行词干提取。
    porter_stem 过滤器等同于词干分析器过滤器的 english 变体。
    porter_stem 筛选器使用 Lucene 的 PorterStemFilter。
    """
    type: str = "porter_stem"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class PredicateScriptTokenFilter(TokenFilter):
    """
    谓词脚本标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-predicatefilter-tokenfilter.html

    Removes tokens that don’t match a provided predicate script. The filter supports inline Painless scripts only.
    Scripts are evaluated in the analysis predicate context.
    删除与提供的谓词脚本不匹配的标记。该过滤器仅支持内联 Painless 脚本。在分析谓词上下文中评估脚本。

    :param script:
        (Required, script object) Script containing a condition used to filter incoming tokens. Only tokens that match
        this script are included in the output.
        This parameter supports inline Painless scripts only. The script is evaluated in the analysis predicate context.
        （必需，脚本对象）包含用于筛选传入令牌的条件的脚本。输出中仅包含与此脚本匹配的令牌。
        此参数仅支持内联 Painless 脚本。脚本在分析谓词上下文中进行评估。
    """
    type: str = "predicate_token_filter"

    def __init__(self, name: str, script: Union[str, Dict, Script], **kwargs):
        super().__init__(name=name, **kwargs)
        self._script: Union[str, Dict, Script] = script
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._script:
            if isinstance(self._script, Script):
                body["script"] = self._script._build()
            elif isinstance(self._script, str):
                body["script"] = {"source": self._script}
            else:
                body["script"] = self._script
        return body


class RemoveDuplicatesTokenFilter(TokenFilter):
    """
    删除重复令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-remove-duplicates-tokenfilter.html

    Removes duplicate tokens in the same position.
    The remove_duplicates filter uses Lucene’s RemoveDuplicatesTokenFilter.
    删除同一位置的重复标记。
    remove_duplicates 筛选器使用 Lucene 的 RemoveDuplicatesTokenFilter。
    """
    type: str = "remove_duplicates"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class ReverseTokenFilter(TokenFilter):
    """
    反向标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-reverse-tokenfilter.html

    Reverses each token in a stream. For example, you can use the reverse filter to change cat to tac.
    Reversed tokens are useful for suffix-based searches, such as finding words that end in -ion or searching file names by their extension.
    This filter uses Lucene’s ReverseStringFilter.
    反转流中的每个标记。例如，您可以使用反向过滤器将 cat 更改为 tac。
    反向标记对于基于后缀的搜索非常有用，例如查找以 -ion 结尾的单词或按扩展名搜索文件名。
    此筛选器使用 Lucene 的 ReverseStringFilter。
    """
    type: str = "reverse"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class ShingleTokenFilter(TokenFilter):
    """
    Shingle 令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-shingle-tokenfilter.html

    Add shingles, or word n-grams, to a token stream by concatenating adjacent tokens. By default, the shingle token
    filter outputs two-word shingles and unigrams.
    For example, many tokenizers convert the lazy dog to [ the, lazy, dog ]. You can use the shingle filter to add
    two-word shingles to this stream: [ the, the lazy, lazy, lazy dog, dog ].
    This filter uses Lucene’s ShingleFilter.
    通过连接相邻标记，将带状疱疹或单词 n 元语法添加到标记流中。默认情况下，shingle token 筛选器输出两个单词的 shingles 和 unigrams。
    例如，许多分词器将 lazy dog 转换为 [ the， lazy， dog ]。您可以使用 shingle 过滤器将两个单词的 shingles 添加到此流中：
    [ the， the lazy， lazy， lazy dog， dog ]。
    此筛选器使用 Lucene 的 ShingleFilter。

    :param min_shingle_size:
        (Optional, integer) Minimum number of tokens to concatenate when creating shingles. Defaults to 2.
        （可选，整数）创建 shingles 时要连接的最小标记数。默认值为 2。
    :param output_unigrams:
        (Optional, Boolean) If true, the output includes the original input tokens. If false, the output only includes
        shingles; the original input tokens are removed. Defaults to true.
        （可选，布尔值）如果为 true，则输出包含原始输入标记。如果为 false，则输出仅包含 shingles;原始 Input Token 将被删除。默认为 true。
    :param output_unigrams_if_no_shingles:
        If true, the output includes the original input tokens only if no shingles are produced; if shingles are
        produced, the output only includes shingles. Defaults to false.
        如果为 true，则仅当未生成 shingle 时，输出才包含原始 input token;如果生成带状疱疹，则输出仅包含带状疱疹。默认为 false。
    :param token_separator:
        (Optional, string) Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (" ").
        （可选，字符串）用于连接相邻标记以形成 shingle 的分隔符。默认为空格 （“ ”）。
    :param filler_token:
        (Optional, string) String used in shingles as a replacement for empty positions that do not contain a token.
        This filler token is only used in shingles, not original unigrams. Defaults to an underscore (_).
        Some token filters, such as the stop filter, create empty positions when removing stop words with a position
        increment greater than one.
        （可选，字符串）带状疱疹中使用的字符串，用于替代不包含标记的空位置。这个填充代币仅用于带状疱疹，而不是原始的 unigram。默认为下划线 （_）。
        某些标记筛选器（如 stop 筛选器）在删除位置增量大于 1 的非索引字时会创建空位置。
    """
    type: str = "shingle"

    def __init__(
            self, name: str, min_shingle_size: int = None, output_unigrams: bool = None,
            output_unigrams_if_no_shingles: bool = None, token_separator: str = None, filler_token: str = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._min_shingle_size: int = min_shingle_size
        self._output_unigrams: bool = output_unigrams
        self._output_unigrams_if_no_shingles: bool = output_unigrams_if_no_shingles
        self._token_separator: str = token_separator
        self._filler_token: str = filler_token
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._min_shingle_size is not None:
            body["min_shingle_size"] = self._min_shingle_size
        if self._output_unigrams is not None:
            body["output_unigrams"] = self._output_unigrams
        if self._output_unigrams_if_no_shingles is not None:
            body["output_unigrams_if_no_shingles"] = self._output_unigrams_if_no_shingles
        if self._token_separator:
            body["token_separator"] = self._token_separator
        if self._filler_token:
            body["filler_token"] = self._filler_token
        return body


class SnowballTokenFilter(TokenFilter):
    """
    Snowball 令牌筛选条件
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-snowball-tokenfilter.html

    A filter that stems words using a Snowball-generated stemmer. The language parameter controls the stemmer with the
    following available values: Arabic, Armenian, Basque, Catalan, Danish, Dutch, English, Estonian, Finnish, French,
    German, German2, Hungarian, Italian, Irish, Kp, Lithuanian, Lovins, Norwegian, Porter, Portuguese, Romanian,
    Russian, Spanish, Swedish, Turkish.
    使用 Snowball 生成的词干提取器对单词进行词干提取的筛选条件。language 参数使用以下可用值控制词干分析器：阿拉伯语、亚美尼亚语、巴斯克语、
    加泰罗尼亚语、丹麦语、荷兰语、英语、爱沙尼亚语、芬兰语、法语、德语、德语、德语 2、匈牙利语、意大利语、爱尔兰语、Kp、立陶宛语、洛文斯语、
    挪威语、波特语、 葡萄牙语、罗马尼亚语、俄语、西班牙语、瑞典语、土耳其语。
    """
    type: str = "snowball"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class StemmerTokenFilter(TokenFilter):
    """
    Snowball 令牌筛选条件
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-stemmer-tokenfilter.html

    Provides algorithmic stemming for several languages, some with additional variants. For a list of supported
    languages, see the language parameter.
    When not customized, the filter uses the porter stemming algorithm for English.
    为多种语言提供算法词干提取，其中一些语言具有其他变体。有关支持的语言列表，请参阅 language 参数。
    如果未自定义，则筛选器对英语使用 porter 词干提取算法。

    :param language:
        (Optional, string) Language-dependent stemming algorithm used to stem tokens. If both this and the name
        parameter are specified, the language parameter argument is used.
        （可选，字符串）用于词干提取标记的依赖于语言的词干提取算法。如果同时指定了 this 和 name 参数，则使用 language 参数参数。
    :param name_:
        An alias for the language parameter. If both this and the language parameter are specified, the language
        parameter argument is used.
        language 参数的别名。如果同时指定了 this 和 language 参数，则使用 language 参数参数。
    """
    type: str = "stemmer"

    def __init__(self, name: str, language: str = None, name_: str = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._language: str = language
        self._name: str = name_
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._language:
            body["language"] = self._language
        if self._name:
            body["name"] = self._name
        return body


class StemmerOverrideTokenFilter(TokenFilter):
    """
    词干分析器覆盖标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-stemmer-override-tokenfilter.html

    Overrides stemming algorithms, by applying a custom mapping, then protecting these terms from being modified by
    stemmers. Must be placed before any stemming filters.
    Rules are mappings in the form of token1[, ..., tokenN] => override.
    通过应用自定义映射来覆盖词干提取算法，然后保护这些术语不被词干提取器修改。必须放在任何词干过滤器之前。
    规则是 token1[， ...， tokenN] => override 形式的映射。

    :param rules:
        A list of mapping rules to use.
        要使用的映射规则列表。
    :param rules_path:
        A path (either relative to config location, or absolute) to a list of mappings.
        指向 mappings 列表的路径（相对于 config location 或绝对路径）。
    """
    type: str = "stemmer_override"

    def __init__(self, name: str, rules: List[str] = None, rules_path: str = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._rules: List[str] = rules
        self._rules_path: str = rules_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._rules:
            body["rules"] = self._rules
        if self._rules_path:
            body["rules_path"] = self._rules_path
        return body


class StopTokenFilter(TokenFilter):
    """
    停止令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-stop-tokenfilter.html

    Removes stop words from a token stream.
    When not customized, the filter removes the following English stop words by default:
    a, an, and, are, as, at, be, but, by, for, if, in, into, is, it, no, not, of, on, or, such, that, the, their, then,
    there, these, they, this, to, was, will, with
    In addition to English, the stop filter supports predefined stop word lists for several languages. You can also
    specify your own stop words as an array or file.
    The stop filter uses Lucene’s StopFilter.

    从令牌流中删除停用词。
    如果未自定义，则默认情况下，筛选器会删除以下英文停用词：
    a， an， and， are， as， at， be， but， by， for， if， in， into， is， it， no， not， of， on， or， such， that， the，
    the， there， these， they， this， to， was， 将，使用
    除了英语之外，停止筛选条件还支持多种语言的预定义停用词列表。您还可以将自己的停用词指定为数组或文件。
    stop 过滤器使用 Lucene 的 StopFilter。

    :param stopwords:
        (Optional, string or array of strings) Language value, such as _arabic_ or _thai_. Defaults to _english_.
        Each language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for
        supported language values and their stop words.
        Also accepts an array of stop words.
        For an empty list of stop words, use _none_.
        （可选，字符串或字符串数组）语言值，例如 _arabic_ 或 _thai_。默认为 _english_。
        每个语言值对应于 Lucene 中预定义的停用词列表。请参阅按语言划分的非索引字，了解支持的语言值及其非索引字。
        还接受停用词数组。
        对于停用词的空列表，请使用 _none_。
    :param stopwords_path:
        (Optional, string) Path to a file that contains a list of stop words to remove.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each stop
        word in the file must be separated by a line break.
        （可选，字符串）包含要删除的停用词列表的文件的路径。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个停用词必须用换行符分隔。
    :param ignore_case:
        (Optional, Boolean) If true, stop word matching is case insensitive. For example, if true, a stop word of the
        matches and removes The, THE, or the. Defaults to false.
        （可选，布尔值）如果为 true，则停用词匹配不区分大小写。例如，如果为 true，则 the 的停用词匹配并删除 The、TH 或 the。默认为 false。
    :param remove_trailing:
        (Optional, Boolean) If true, the last token of a stream is removed if it’s a stop word. Defaults to true.
        This parameter should be false when using the filter with a completion suggester. This would ensure a query
        like green a matches and suggests green apple while still removing other stop words.
        （可选，布尔值）如果为 true，则删除流的最后一个标记（如果它是停用词）。默认为 true。
        将筛选器与完成建议器一起使用时，此参数应为 false。这将确保像 green a 这样的查询匹配并建议 green apple，同时仍然删除其他停用词。
    """
    type: str = "stop"

    def __init__(
            self, name: str, stopwords: List[str] = None, stopwords_path: str = None, ignore_case: bool = None,
            remove_trailing: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._stopwords: List[str] = stopwords
        self._stopwords_path: str = stopwords_path
        self._ignore_case: bool = ignore_case
        self._remove_trailing: bool = remove_trailing
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._stopwords:
            body["stopwords"] = self._stopwords
        if self._stopwords_path:
            body["stopwords_path"] = self._stopwords_path
        if self._ignore_case is not None:
            body["ignore_case"] = self._ignore_case
        if self._remove_trailing is not None:
            body["remove_trailing"] = self._remove_trailing
        return body


class SynonymTokenFilter(TokenFilter):
    """
    同义词标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-synonym-tokenfilter.html

    RThe synonym token filter allows to easily handle synonyms during the analysis process. Synonyms are configured
    using a configuration file. Here is an example:
    同义词标记过滤器允许在分析过程中轻松处理同义词。同义词使用配置文件进行配置。

    :param format_:
    :param synonyms:
    :param lenient:
    :param tokenizer:
    :param ignore_case:
    """
    type: str = "synonym"

    def __init__(
            self, name: str, format_: Literal["solr", "wordnet"] = None, synonyms: List[str] = None,
            lenient: bool = None, tokenizer: str = None, ignore_case: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._format: Optional[Literal["solr", "wordnet"]] = format_
        self._synonyms: List[str] = synonyms
        self._lenient: bool = lenient
        self._tokenizer: str = tokenizer
        self._ignore_case: bool = ignore_case
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._format:
            body["format"] = self._format
        if self._synonyms:
            body["synonyms"] = self._synonyms
        if self._lenient is not None:
            body["lenient"] = self._lenient
        if self._tokenizer:
            body["tokenizer"] = self._tokenizer
        if self._ignore_case is not None:
            body["ignore_case"] = self._ignore_case
        return body


class SynonymGraphTokenFilter(TokenFilter):
    """
    同义词图标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-synonym-graph-tokenfilter.html

    The synonym_graph token filter allows to easily handle synonyms, including multi-word synonyms correctly during
    the analysis process.
    In order to properly handle multi-word synonyms this token filter creates a graph token stream during processing.
    For more information on this topic and its various complexities, please read the Lucene’s TokenStreams are actually
    graphs blog post.
    synonym_graph 标记过滤器允许在分析过程中正确处理同义词，包括多词同义词。
    为了正确处理多单词同义词，此 token 过滤器在处理过程中创建一个图形 token 流。有关此主题及其各种复杂性的更多信息，请阅读 Lucene 的
    TokenStreams 实际上是图形的博客文章。

    :param format_:
    :param synonyms:
    :param synonyms_path:
    :param lenient:
    :param tokenizer:
    :param ignore_case:
    """
    type: str = "graph_synonyms"

    def __init__(
            self, name: str, format_: Literal["solr", "wordnet"] = None, synonyms: List[str] = None,
            synonyms_path: str = None, lenient: bool = None, tokenizer: str = None, ignore_case: bool = None, **kwargs
    ):
        super().__init__(name=name, **kwargs)
        self._format: Optional[Literal["solr", "wordnet"]] = format_
        self._synonyms: List[str] = synonyms
        self._synonyms_path: str = synonyms_path
        self._lenient: bool = lenient
        self._tokenizer: str = tokenizer
        self._ignore_case: bool = ignore_case
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._format:
            body["format"] = self._format
        if self._synonyms:
            body["synonyms"] = self._synonyms
        if self._synonyms_path:
            body["synonyms_path"] = self._synonyms_path
        if self._lenient is not None:
            body["lenient"] = self._lenient
        if self._tokenizer:
            body["tokenizer"] = self._tokenizer
        if self._ignore_case is not None:
            body["ignore_case"] = self._ignore_case
        return body


class TrimTokenFilter(TokenFilter):
    """
    修剪标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-trim-tokenfilter.html

    Removes leading and trailing whitespace from each token in a stream. While this can change the length of a token,
    the trim filter does not change a token’s offsets.
    The trim filter uses Lucene’s TrimFilter.
    从流中的每个标记中删除前导和尾随空格。虽然这可以更改令牌的长度，但 trim 筛选器不会更改令牌的偏移量。
    trim 过滤器使用 Lucene 的 TrimFilter。
    """
    type: str = "trim"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class TruncateTokenFilter(TokenFilter):
    """
    截断令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-truncate-tokenfilter.html

    Truncates tokens that exceed a specified character limit. This limit defaults to 10 but can be customized using the
    length parameter.
    For example, you can use the truncate filter to shorten all tokens to 3 characters or fewer, changing jumping fox
    to jum fox.
    This filter uses Lucene’s TruncateTokenFilter.
    截断超过指定字符限制的标记。此限制默认为 10，但可以使用 length 参数进行自定义。
    例如，您可以使用 truncate 过滤器将所有标记缩短到 3 个字符或更少，从而将 jumping fox 更改为 jum fox。
    此筛选器使用 Lucene 的 TruncateTokenFilter。

    :param length:
        (Optional, integer) Character limit for each token. Tokens exceeding this limit are truncated. Defaults to 10.
        （可选，整数）每个令牌的字符限制。超过此限制的令牌将被截断。默认值为 10。
    """
    type: str = "truncate"

    def __init__(self, name: str, length: int = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._length: int = length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._length is not None:
            body["length"] = self._length
        return body


class UniqueTokenFilter(TokenFilter):
    """
    唯一令牌筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-unique-tokenfilter.html

    Removes duplicate tokens from a stream. For example, you can use the unique filter to change the lazy lazy dog to
    the lazy dog.
    If the only_on_same_position parameter is set to true, the unique filter removes only duplicate tokens in the same
    position.
    从流中删除重复的令牌。例如，您可以使用唯一过滤器将 lazy lazy dog 更改为 lazy dog。
    如果 only_on_same_position 参数设置为 true，则唯一筛选条件仅删除同一位置的重复标记。

    :param only_on_same_position:
        (Optional, Boolean) If true, only remove duplicate tokens in the same position. Defaults to false.
        （可选，布尔值）如果为 true，则仅删除同一位置的重复标记。默认为 false。
    """
    type: str = "unique"

    def __init__(self, name: str, only_on_same_position: bool = None, **kwargs):
        super().__init__(name=name, **kwargs)
        self._only_on_same_position: bool = only_on_same_position
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._only_on_same_position is not None:
            body["only_on_same_position"] = self._only_on_same_position
        return body


class UppercaseTokenFilter(TokenFilter):
    """
    大写标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-uppercase-tokenfilter.html

    Changes token text to uppercase. For example, you can use the uppercase filter to change the Lazy DoG to THE LAZY DOG.
    This filter uses Lucene’s UpperCaseFilter.
    将标记文本更改为大写。例如，您可以使用大写过滤器将 Lazy DoG 更改为 THE LAZY DOG。
    此筛选器使用 Lucene 的 UpperCaseFilter。
    """
    type: str = "uppercase"

    def __init__(self, name: str, **kwargs):
        super().__init__(name=name, **kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class WordDelimiterTokenFilter(AsciiFoldingTokenFilter):
    """
    Word delimiter token 筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-word-delimiter-tokenfilter.html

    Splits tokens at non-alphanumeric characters. The word_delimiter filter also performs optional token normalization
    based on a set of rules. By default, the filter uses the following rules:
        1. Split tokens at non-alphanumeric characters. The filter uses these characters as delimiters. For example:
            Super-Duper → Super, Duper
        2. Remove leading or trailing delimiters from each token. For example: XL---42+'Autocoder' → XL, 42, Autocoder
        3. Split tokens at letter case transitions. For example: PowerShot → Power, Shot
        4. Split tokens at letter-number transitions. For example: XL500 → XL, 500
        5. Remove the English possessive ('s) from the end of each token. For example: Neil's → Neil
    在非字母数字字符处拆分标记。word_delimiter 筛选条件还根据一组规则执行可选的令牌规范化。默认情况下，筛选器使用以下规则：
        1. 在非字母数字字符处拆分标记。筛选器使用这些字符作为分隔符。例如：Super-Duper → Super、Duper
        2. 从每个令牌中删除前导或尾随分隔符。例如：XL---42+'Autocoder' → XL， 42， Autocoder
        3. 在字母大小写转换时拆分标记。例如：PowerShot → Power、Shot
        4. 在字母-数字转换处拆分标记。例如：XL500 → XL、500
        5. 删除每个标记末尾的英语所有格 （'s）。例如：Neil 的 → Neil

    :param catenate_all:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of alphanumeric characters
        separated by non-alphabetic delimiters. For example: super-duper-xl-500 → [ super, superduperxl500, duper, xl,
        500 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的字母数字字符链生成连接标记。例如：super-duper-xl-500 → [ super，
        superduperxl500， duper， xl， 500 ]。默认为 false。
    :param catenate_numbers:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of numeric characters separated
        by non-alphabetic delimiters. For example: 01-02-03 → [ 01, 010203, 02, 03 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的数字字符链生成连锁标记。例如：01-02-03 → [ 01， 010203， 02， 03 ]。
        默认为 false。
    :param catenate_words:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of alphabetical characters
        separated by non-alphabetic delimiters. For example: super-duper-xl → [ super, superduperxl, duper, xl ].
        Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的字母字符链生成连锁标记。例如：super-duper-xl → [ super，
        superduperxl， duper， xl ]。默认为 false。
    :param generate_number_parts:
        (Optional, Boolean) If true, the filter includes tokens consisting of only numeric characters in the output.
        If false, the filter excludes these tokens from the output. Defaults to true.
        （可选，布尔值）如果为 true，则筛选条件在输出中包含仅包含数字字符的标记。如果为 false，则筛选器将从输出中排除这些标记。默认为 true。
    :param generate_word_parts:
        (Optional, Boolean) If true, the filter includes tokens consisting of only alphabetical characters in the
        output. If false, the filter excludes these tokens from the output. Defaults to true.
        （可选，布尔值）如果为 true，则筛选器在输出中包含仅包含字母字符的标记。如果为 false，则筛选器将从输出中排除这些标记。默认为 true。
    :param preserve_original:
        (Optional, Boolean) If true, the filter includes the original version of any split tokens in the output. This
        original version includes non-alphanumeric delimiters. For example: super-duper-xl-500 → [ super-duper-xl-500,
        super, duper, xl, 500 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选条件将在输出中包含任何 split tokens 的原始版本。此原始版本包括非字母数字分隔符。例如：
        super-duper-xl-500 → [ super-duper-xl-500， super， duper， xl， 500 ]。默认为 false。
    :param protected_words:
        (Optional, array of strings) Array of tokens the filter won’t split.
        （可选，字符串数组）筛选器不会拆分的令牌数组。
    :param protected_words_path:
        (Optional, string) Path to a file that contains a list of tokens the filter won’t split.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token
        in the file must be separated by a line break.
        （可选，字符串）包含筛选器不会拆分的令牌列表的文件的路径。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
    :param split_on_case_change:
        (Optional, Boolean) If true, the filter splits tokens at letter case transitions. For example: camelCase →
        [ camel, Case ]. Defaults to true.
        （可选，布尔值）如果为 true，则筛选器在字母大小写转换时拆分标记。例如： camelCase → [ camel， Case ]。默认为 true。
    :param split_on_numerics:
        (Optional, Boolean) If true, the filter splits tokens at letter-number transitions. For example: j2se →
        [ j, 2, se ]. Defaults to true.
        （可选，布尔值）如果为 true，则过滤器在字母数字转换时拆分令牌。例如：j2se → [ j， 2， se ]。默认为 true。
    :param stem_english_possessive:
        (Optional, Boolean) If true, the filter removes the English possessive ('s) from the end of each token.
        For example: O'Neil's → [ O, Neil ]. Defaults to true.
        （可选，布尔值）如果为 true，则过滤器会从每个标记的末尾删除英语所有格 （'s）。例如：O'Neil 的 → [ O， Neil ]。默认为 true。
    :param type_table:
        (Optional, array of strings) Array of custom type mappings for characters. This allows you to map
        non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
        For example, the following array maps the plus (+) and hyphen (-) characters as alphanumeric, which means
        they won’t be treated as delimiters:
        [ "+ => ALPHA", "- => ALPHA" ]
        （可选，字符串数组）字符的自定义类型映射数组。这允许您将非字母数字字符映射为数字或字母数字，以避免对这些字符进行拆分。
        例如，以下数组将加号 （+） 和连字符 （-） 映射为字母数字，这意味着它们不会被视为分隔符：
        [ "+ => ALPHA", "- => ALPHA" ]
    :param type_table_path:
        (Optional, string) Path to a file that contains custom type mappings for characters. This allows you to map
        non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
        （可选，字符串）包含字符的自定义类型映射的文件的路径。这允许您将非字母数字字符映射为数字或字母数字，以避免对这些字符进行拆分。
    """
    type: str = "word_delimiter"

    def __init__(
            self, name: str, catenate_all: bool = None, catenate_numbers: bool = None, catenate_words: bool = None,
            generate_number_parts: bool = None, generate_word_parts: bool = None, preserve_original: bool = None,
            protected_words: List[str] = None, protected_words_path: str = None, split_on_case_change: bool = None,
            split_on_numerics: bool = None, stem_english_possessive: bool = None,
            type_table: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = None,
            type_table_path: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = None,
            **kwargs
    ):
        super().__init__(name=name, preserve_original=preserve_original, **kwargs)
        self._catenate_all: bool = catenate_all
        self._catenate_numbers: bool = catenate_numbers
        self._catenate_words: bool = catenate_words
        self._generate_number_parts: bool = generate_number_parts
        self._generate_word_parts: bool = generate_word_parts
        self._protected_words: List[str] = protected_words
        self._protected_words_path: str = protected_words_path
        self._split_on_case_change: bool = split_on_case_change
        self._split_on_numerics: bool = split_on_numerics
        self._stem_english_possessive: bool = stem_english_possessive
        self._type_table: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = (
            type_table
        )
        self._type_table_path: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = (
            type_table_path
        )
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._catenate_all is not None:
            body["catenate_all"] = self._catenate_all
        if self._catenate_numbers is not None:
            body["catenate_numbers"] = self._catenate_numbers
        if self._catenate_words is not None:
            body["catenate_words"] = self._catenate_words
        if self._generate_number_parts is not None:
            body["generate_number_parts"] = self._generate_number_parts
        if self._generate_word_parts is not None:
            body["generate_word_parts"] = self._generate_word_parts
        if self._protected_words:
            body["protected_words"] = self._protected_words
        if self._protected_words_path:
            body["protected_words_path"] = self._protected_words_path
        if self._split_on_case_change is not None:
            body["split_on_case_change"] = self._split_on_case_change
        if self._split_on_numerics is not None:
            body["split_on_numerics"] = self._split_on_numerics
        if self._stem_english_possessive is not None:
            body["stem_english_possessive"] = self._stem_english_possessive
        if self._type_table:
            body["type_table"] = self._type_table
        if self._type_table_path:
            body["type_table_path"] = self._type_table_path
        return body


class WordDelimiterGraphTokenFilter(WordDelimiterTokenFilter):
    """
    Word delimiter 图形标记筛选器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-word-delimiter-graph-tokenfilter.html

    Splits tokens at non-alphanumeric characters. The word_delimiter_graph filter also performs optional token
    normalization based on a set of rules. By default, the filter uses the following rules:
        1. Split tokens at non-alphanumeric characters. The filter uses these characters as delimiters. For example:
            Super-Duper → Super, Duper
        2. Remove leading or trailing delimiters from each token. For example: XL---42+'Autocoder' → XL, 42, Autocoder
        3. Split tokens at letter case transitions. For example: PowerShot → Power, Shot
        4. Split tokens at letter-number transitions. For example: XL500 → XL, 500
        5. Remove the English possessive ('s) from the end of each token. For example: Neil's → Neil
    The word_delimiter_graph filter uses Lucene’s WordDelimiterGraphFilter.
    在非字母数字字符处拆分标记。word_delimiter_graph 筛选条件还根据一组规则执行可选的令牌规范化。默认情况下，筛选器使用以下规则：
        1. 在非字母数字字符处拆分标记。筛选器使用这些字符作为分隔符。例如：Super-Duper → Super， Duper
        2. 从每个令牌中删除前导或尾随分隔符。例如：XL---42+'Autocoder' → XL， 42， Autocoder
        3. 在字母大小写转换时拆分标记。例如：PowerShot → Power、Shot
        4. 在字母-数字转换处拆分标记。例如：XL500 → XL、500
        5. 删除每个标记末尾的英语所有格 （'s）。例如：Neil 的 → Neil
    word_delimiter_graph 筛选器使用 Lucene 的 WordDelimiterGraphFilter。

    :param adjust_offsets:
        (Optional, Boolean) If true, the filter adjusts the offsets of split or catenated tokens to better reflect
        their actual position in the token stream. Defaults to true.
        （可选，布尔值）如果为 true，则筛选器将调整拆分或链式令牌的偏移量，以更好地反映它们在令牌流中的实际位置。默认为 true。
    :param catenate_all:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of alphanumeric characters
        separated by non-alphabetic delimiters. For example: super-duper-xl-500 → [ super, superduperxl500, duper, xl,
        500 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的字母数字字符链生成连接标记。例如：super-duper-xl-500 → [ super，
        superduperxl500， duper， xl， 500 ]。默认为 false。
    :param catenate_numbers:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of numeric characters separated
        by non-alphabetic delimiters. For example: 01-02-03 → [ 01, 010203, 02, 03 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的数字字符链生成连锁标记。例如：01-02-03 → [ 01， 010203， 02， 03 ]。
        默认为 false。
    :param catenate_words:
        (Optional, Boolean) If true, the filter produces catenated tokens for chains of alphabetical characters
        separated by non-alphabetic delimiters. For example: super-duper-xl → [ super, superduperxl, duper, xl ].
        Defaults to false.
        （可选，布尔值）如果为 true，则筛选器为由非字母分隔符分隔的字母字符链生成连锁标记。例如：super-duper-xl → [ super，
        superduperxl， duper， xl ]。默认为 false。
    :param generate_number_parts:
        (Optional, Boolean) If true, the filter includes tokens consisting of only numeric characters in the output.
        If false, the filter excludes these tokens from the output. Defaults to true.
        （可选，布尔值）如果为 true，则筛选条件在输出中包含仅包含数字字符的标记。如果为 false，则筛选器将从输出中排除这些标记。默认为 true。
    :param generate_word_parts:
        (Optional, Boolean) If true, the filter includes tokens consisting of only alphabetical characters in the
        output. If false, the filter excludes these tokens from the output. Defaults to true.
        （可选，布尔值）如果为 true，则筛选器在输出中包含仅包含字母字符的标记。如果为 false，则筛选器将从输出中排除这些标记。默认为 true。
    :param ignore_keywords:
        (Optional, Boolean) If true, the filter skips tokens with a keyword attribute of true. Defaults to false.
        （可选，布尔值）如果为 true，则筛选条件会跳过 keyword 属性为 true 的标记。默认为 false。
    :param preserve_original:
        (Optional, Boolean) If true, the filter includes the original version of any split tokens in the output. This
        original version includes non-alphanumeric delimiters. For example: super-duper-xl-500 → [ super-duper-xl-500,
        super, duper, xl, 500 ]. Defaults to false.
        （可选，布尔值）如果为 true，则筛选条件将在输出中包含任何 split tokens 的原始版本。此原始版本包括非字母数字分隔符。例如：
        super-duper-xl-500 → [ super-duper-xl-500， super， duper， xl， 500 ]。默认为 false。
    :param protected_words:
        (Optional, array of strings) Array of tokens the filter won’t split.
        （可选，字符串数组）筛选器不会拆分的令牌数组。
    :param protected_words_path:
        (Optional, string) Path to a file that contains a list of tokens the filter won’t split.
        This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token
        in the file must be separated by a line break.
        （可选，字符串）包含筛选器不会拆分的令牌列表的文件的路径。
        此路径必须是绝对路径或相对于配置位置，并且文件必须采用 UTF-8 编码。文件中的每个标记都必须用换行符分隔。
    :param split_on_case_change:
        (Optional, Boolean) If true, the filter splits tokens at letter case transitions. For example: camelCase →
        [ camel, Case ]. Defaults to true.
        （可选，布尔值）如果为 true，则筛选器在字母大小写转换时拆分标记。例如： camelCase → [ camel， Case ]。默认为 true。
    :param split_on_numerics:
        (Optional, Boolean) If true, the filter splits tokens at letter-number transitions. For example: j2se →
        [ j, 2, se ]. Defaults to true.
        （可选，布尔值）如果为 true，则过滤器在字母数字转换时拆分令牌。例如：j2se → [ j， 2， se ]。默认为 true。
    :param stem_english_possessive:
        (Optional, Boolean) If true, the filter removes the English possessive ('s) from the end of each token.
        For example: O'Neil's → [ O, Neil ]. Defaults to true.
        （可选，布尔值）如果为 true，则过滤器会从每个标记的末尾删除英语所有格 （'s）。例如：O'Neil 的 → [ O， Neil ]。默认为 true。
    :param type_table:
        (Optional, array of strings) Array of custom type mappings for characters. This allows you to map
        non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
        For example, the following array maps the plus (+) and hyphen (-) characters as alphanumeric, which means
        they won’t be treated as delimiters:
        [ "+ => ALPHA", "- => ALPHA" ]
        （可选，字符串数组）字符的自定义类型映射数组。这允许您将非字母数字字符映射为数字或字母数字，以避免对这些字符进行拆分。
        例如，以下数组将加号 （+） 和连字符 （-） 映射为字母数字，这意味着它们不会被视为分隔符：
        [ "+ => ALPHA", "- => ALPHA" ]
    :param type_table_path:
        (Optional, string) Path to a file that contains custom type mappings for characters. This allows you to map
        non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.
        （可选，字符串）包含字符的自定义类型映射的文件的路径。这允许您将非字母数字字符映射为数字或字母数字，以避免对这些字符进行拆分。
    """
    type: str = "word_delimiter"

    def __init__(
            self, name: str, adjust_offsets: bool = None, catenate_all: bool = None, catenate_numbers: bool = None,
            catenate_words: bool = None, generate_number_parts: bool = None, generate_word_parts: bool = None,
            ignore_keywords: bool = None, preserve_original: bool = None,
            protected_words: List[str] = None, protected_words_path: str = None, split_on_case_change: bool = None,
            split_on_numerics: bool = None, stem_english_possessive: bool = None,
            type_table: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = None,
            type_table_path: List[Literal["ALPHA", "ALPHANUM", "DIGIT", "LOWER", "SUBWORD_DELIM", "UPPER"]] = None,
            **kwargs
    ):
        super().__init__(
            name=name, catenate_all=catenate_all, catenate_numbers=catenate_numbers, catenate_words=catenate_words,
            generate_number_parts=generate_number_parts, generate_word_parts=generate_word_parts,
            preserve_original=preserve_original, protected_words=protected_words,
            protected_words_path=protected_words_path, split_on_case_change=split_on_case_change,
            split_on_numerics=split_on_numerics, stem_english_possessive=stem_english_possessive,
            type_table=type_table, type_table_path=type_table_path, **kwargs)
        self._adjust_offsets: bool = adjust_offsets
        self._ignore_keywords: bool = ignore_keywords
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._adjust_offsets is not None:
            body["adjust_offsets"] = self._adjust_offsets
        if self._ignore_keywords is not None:
            body["ignore_keywords"] = self._ignore_keywords
        return body






