"""
@author: 江同学呀
@file: structured_text_tokenizer.py
@date: 2024/11/24 17:10
@desc: 
"""
from typing import Dict, Union, List

from espc.common.tokenizer_common import TokenCharacter
from espc.orm.model.text_analysis.tokenizer.base_tokenizer import BaseTokenizer


class KeywordTokenizer(BaseTokenizer):
    """
    关键字分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keyword-tokenizer.html

    The keyword tokenizer is a “noop” tokenizer that accepts whatever text it is given and outputs the exact same text
    as a single term. It can be combined with token filters to normalise output, e.g. lower-casing email addresses.
    关键字分词器是一个 “noop” 分词器，它接受给定的任何文本，并输出与单个术语完全相同的文本。它可以与令牌过滤器结合使用以规范化输出，例如小写的
    电子邮件地址。

    :param buffer_size:
        The number of characters read into the term buffer in a single pass. Defaults to 256. The term buffer will grow
        by this size until all the text has been consumed. It is advisable not to change this setting.
        在一次传递中读入术语缓冲区的字符数。默认为 256。术语 buffer 将按此大小增长，直到所有文本都被消耗完。建议不要更改此设置。
    """
    type: str = "keyword"

    def __init__(self, buffer_size: int = None, **kwargs):
        super().__init__(**kwargs)
        self._buffer_size: int = buffer_size
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._buffer_size is not None:
            body["buffer_size"] = self._buffer_size
        return body


class PatternTokenizer(BaseTokenizer):
    r"""
    模式分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-pattern-tokenizer.html

    The pattern tokenizer uses a regular expression to either split text into terms whenever it matches a word
    separator, or to capture matching text as terms.
    The default pattern is \W+, which splits text whenever it encounters non-word characters.
    模式分词器使用正则表达式在文本与单词分隔符匹配时将文本拆分为术语，或者将匹配的文本捕获为术语。
    默认模式为 \W+，每当遇到非单词字符时，它都会拆分文本。

    :param pattern:
        A Java regular expression, defaults to \W+.
        Java 正则表达式，默认为 \W+。
    :param flags:
        Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS".
        Java 正则表达式标志。标志应以管道分隔，例如 “CASE_INSENSITIVE|评论”。
    :param group:
        Which capture group to extract as tokens. Defaults to -1 (split).
        要提取为令牌的捕获组。默认为 -1 （split）。
    """
    type: str = "pattern"

    def __init__(self, pattern: str = None, flags: str = None, group: int = None, **kwargs):
        super().__init__(**kwargs)
        self._pattern: str = pattern
        self._flags: str = flags
        self._group: int = group
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._pattern:
            body["pattern"] = self._pattern
        if self._flags:
            body["flags"] = self._flags
        if self._group is not None:
            body["group"] = self._group
        return body


class SimplePatternTokenizer(BaseTokenizer):
    """
    简单模式分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-simplepattern-tokenizer.html

    The simple_pattern tokenizer uses a regular expression to capture matching text as terms. The set of regular
    expression features it supports is more limited than the pattern tokenizer, but the tokenization is generally
    faster.
    This tokenizer does not support splitting the input on a pattern match, unlike the pattern tokenizer. To split on
    pattern matches using the same restricted regular expression subset, see the simple_pattern_split tokenizer.
    This tokenizer uses Lucene regular expressions. For an explanation of the supported features and syntax, see Regular
    Expression Syntax.
    The default pattern is the empty string, which produces no terms. This tokenizer should always be configured with a
    non-default pattern.
    simple_pattern 分词器使用正则表达式将匹配的文本捕获为术语。它支持的正则表达式功能集比模式分词器更有限，但分词化通常更快。
    与模式分词器不同，此分词器不支持在模式匹配上拆分输入。要使用相同的受限正则表达式子集在模式匹配时进行拆分，请参阅 simple_pattern_split 分词器。
    此分词器使用 Lucene 正则表达式。有关支持的功能和语法的说明，请参阅正则表达式语法。
    默认模式是空字符串，它不会生成任何项。此分词器应始终使用非默认模式进行配置。

    :param pattern:
        Lucene regular expression, defaults to the empty string.
        Lucene 正则表达式，默认为空字符串。
    """
    type: str = "simple_pattern"

    def __init__(self, pattern: str = None, **kwargs):
        super().__init__(**kwargs)
        self._pattern: str = pattern
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._pattern:
            body["pattern"] = self._pattern
        return body


class CharacterGroupTokenizer(BaseTokenizer):
    """
    字符组分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-chargroup-tokenizer.html

    The char_group tokenizer breaks text into terms whenever it encounters a character which is in a defined set. It is
    mostly useful for cases where a simple custom tokenization is desired, and the overhead of use of the pattern
    tokenizer is not acceptable.
    char_group 分词器在遇到定义集中的字符时将文本分解为术语。它最适用于需要简单自定义分词化的情况，并且使用模式分词器的开销是不可接受的。

    :param tokenize_on_chars:
        A list containing a list of characters to tokenize the string on. Whenever a character from this list is
        encountered, a new token is started. This accepts either single characters like e.g. -, or character groups:
        whitespace, letter, digit, punctuation, symbol.
        一个列表，其中包含要对字符串进行标记的字符列表。每当遇到此列表中的字符时，就会启动一个新的令牌。这接受单个字符（例如 -）或字符组：
        空格、字母、数字、标点符号、符号。
    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    """
    type: str = "char_group"

    def __init__(
            self, tokenize_on_chars: List[Union[str, TokenCharacter]] = None, max_token_length: int = None, **kwargs
    ):
        super().__init__(**kwargs)
        self._tokenize_on_chars: List[Union[str, TokenCharacter]] = tokenize_on_chars
        self._max_token_length: int = max_token_length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._tokenize_on_chars:
            body["tokenize_on_chars"] = [
                _toc.value if isinstance(_toc, TokenCharacter) else _toc  for _toc in self._tokenize_on_chars
            ]
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        return body


class SimplePatternSplitTokenizer(BaseTokenizer):
    """
    简单模式拆分分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-simplepatternsplit-tokenizer.html

    The simple_pattern_split tokenizer uses a regular expression to split the input into terms at pattern matches. The
    set of regular expression features it supports is more limited than the pattern tokenizer, but the tokenization is
    generally faster.
    This tokenizer does not produce terms from the matches themselves. To produce terms from matches using patterns in
    the same restricted regular expression subset, see the simple_pattern tokenizer.
    This tokenizer uses Lucene regular expressions. For an explanation of the supported features and syntax, see Regular
    Expression Syntax.
    The default pattern is the empty string, which produces one term containing the full input. This tokenizer should
    always be configured with a non-default pattern.
    simple_pattern_split 分词器使用正则表达式在模式匹配时将输入拆分为术语。它支持的正则表达式功能集比模式分词器更有限，但分词化通常更快。
    此分词器不会从匹配项本身生成术语。要使用同一受限正则表达式子集中的模式从匹配项中生成术语，请参阅 simple_pattern 分词器。
    此分词器使用 Lucene 正则表达式。有关支持的功能和语法的说明，请参阅正则表达式语法。
    默认模式是空字符串，它生成一个包含完整输入的术语。此分词器应始终使用非默认模式进行配置。

    :param pattern:
        Lucene regular expression, defaults to the empty string.
        Lucene 正则表达式，默认为空字符串。
    """
    type: str = "simple_pattern_split"

    def __init__(self, pattern: str = None, **kwargs):
        super().__init__(**kwargs)
        self._pattern: str = pattern
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._pattern:
            body["pattern"] = self._pattern
        return body


class PathHierarchyTokenizer(BaseTokenizer):
    """
    路径层次结构分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-pathhierarchy-tokenizer.html

    The path_hierarchy tokenizer takes a hierarchical value like a filesystem path, splits on the path separator, and
    emits a term for each component in the tree.
    path_hierarchy 分词器采用分层值（如文件系统路径），在路径分隔符上拆分，并为树中的每个组件发出一个术语。

    :param delimiter:
        The character to use as the path separator. Defaults to /.
        要用作路径分隔符的字符。默认为 /。
    :param replacement:
        An optional replacement character to use for the delimiter. Defaults to the delimiter.
        用于分隔符的可选替换字符。默认为分隔符。
    :param buffer_size:
        The number of characters read into the term buffer in a single pass. Defaults to 1024. The term buffer will
        grow by this size until all the text has been consumed. It is advisable not to change this setting.
        在一次传递中读入术语缓冲区的字符数。默认为 1024。术语 buffer 将按此大小增长，直到所有文本都被消耗完。建议不要更改此设置。
    :param reverse:
        If set to true, emits the tokens in reverse order. Defaults to false.
        如果设置为 true，则以相反的顺序发出令牌。默认为 false。
    :param skip:
        The number of initial tokens to skip. Defaults to 0.
        要跳过的初始令牌数。默认为 0。
    """
    type: str = "path_hierarchy"

    def __init__(self, delimiter: str = None, replacement: str = None, buffer_size: int = None, reverse: str = None,
                 skip: int = None, **kwargs):
        super().__init__(**kwargs)
        self._delimiter: str = delimiter
        self._replacement: str = replacement
        self._buffer_size: int = buffer_size
        self._reverse: str = reverse
        self._skip: int = skip
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._delimiter:
            body["delimiter"] = self._delimiter
        if self._replacement:
            body["replacement"] = self._replacement
        if self._buffer_size is not None:
            body["buffer_size"] = self._buffer_size
        if self._reverse:
            body["reverse"] = self._reverse
        if self._skip is not None:
            body["skip"] = self._skip
        return body


PathTokenizer = PathHierarchyTokenizer



















