"""
@author: 江同学呀
@file: partial_word_tokenizer.py
@date: 2024/11/24 16:52
@desc: 
"""

from typing import Dict, List, Union

from espc.common.tokenizer_common import TokenCharacter
from espc.orm.model.text_analysis.tokenizer.base_tokenizer import BaseTokenizer


class NGramTokenizer(BaseTokenizer):
    """
    N-Gram 分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-ngram-tokenizer.html

    The ngram tokenizer first breaks text down into words whenever it encounters one of a list of specified characters,
    then it emits N-grams of each word of the specified length.
    N-grams are like a sliding window that moves across the word - a continuous sequence of characters of the specified
    length. They are useful for querying languages that don’t use spaces or that have long compound words, like German.
    每当遇到指定字符列表中的一个时，ngram 分词器首先将文本分解为单词，然后它发出指定长度的每个单词的 N 元语法。
    N 元语法就像一个在单词上移动的滑动窗口 - 指定长度的连续字符序列。它们对于查询不使用空格或具有长复合词的语言（如德语）非常有用。

    :param min_gram:
        Minimum length of characters in a gram. Defaults to 1.
        gram 中字符的最小长度。默认值为 1。
    :param max_gram:
        Maximum length of characters in a gram. Defaults to 2.
        gram 中的最大字符长度。默认值为 2。
    :param token_chars:
        Character classes that should be included in a token. Elasticsearch will split on characters that don’t belong
        to the classes specified. Defaults to [] (keep all characters).
        应包含在令牌中的字符类。Elasticsearch 将在不属于指定类的字符上进行拆分。默认为 []（保留所有字符）。
    :param custom_token_chars:
        Custom characters that should be treated as part of a token. For example, setting this to +-_ will make the
        tokenizer treat the plus, minus and underscore sign as part of a token.
        应被视为令牌一部分的自定义字符。例如，将此设置为 +-_ 将使分词器将加号、减号和下划线符号视为分词的一部分。
    """
    type: str = "ngram"

    def __init__(self, min_gram: int = None, max_gram: int = None, token_chars: List[Union[str, TokenCharacter]] = None,
                 custom_token_chars: str = None, **kwargs):
        super().__init__(**kwargs)
        self._min_gram: int = min_gram
        self._max_gram: int = max_gram
        self._token_chars: List[Union[str, TokenCharacter]] = token_chars
        self._custom_token_chars: str = custom_token_chars
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._min_gram is not None:
            body["min_gram"] = self._min_gram
        if self._max_gram is not None:
            body["max_gram"] = self._max_gram
        if self._token_chars:
            body["token_chars"] = [_tc.value if isinstance(_tc, TokenCharacter) else _tc for _tc in self._token_chars]
        if self._custom_token_chars:
            body["custom_token_chars"] = self._custom_token_chars
        return body


class EdgeNGramTokenizer(BaseTokenizer):
    """
    Edge N-Gram 分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-edgengram-tokenizer.html

    The edge_ngram tokenizer can break up text into words when it encounters any of a list of specified characters (e.g.
    whitespace or punctuation), then it returns n-grams of each word which are anchored to the start of the word, e.g.
    quick → [q, qu, qui, quic, quick].
    每当 edge_ngram 分词器遇到指定字符列表中的一个时，它首先将文本分解为多个单词，然后发出每个单词的 N 元语法，其中 N 元语法的开头锚定到单词的开头。

    :param min_gram:
        Minimum length of characters in a gram. Defaults to 1.
        gram 中字符的最小长度。默认值为 1。
    :param max_gram:
        Maximum length of characters in a gram. Defaults to 2.
        gram 中的最大字符长度。默认值为 2。
    :param token_chars:
        Character classes that should be included in a token. Elasticsearch will split on characters that don’t belong
        to the classes specified. Defaults to [] (keep all characters).
        应包含在令牌中的字符类。Elasticsearch 将在不属于指定类的字符上进行拆分。默认为 []（保留所有字符）。
    :param custom_token_chars:
        Custom characters that should be treated as part of a token. For example, setting this to +-_ will make the
        tokenizer treat the plus, minus and underscore sign as part of a token.
        应被视为令牌一部分的自定义字符。例如，将此设置为 +-_ 将使分词器将加号、减号和下划线符号视为分词的一部分。
    """
    type: str = "edge_ngram"

    def __init__(self, min_gram: int = None, max_gram: int = None, token_chars: List[Union[str, TokenCharacter]] = None,
                 custom_token_chars: str = None, **kwargs):
        super().__init__(**kwargs)
        self._min_gram: int = min_gram
        self._max_gram: int = max_gram
        self._token_chars: List[Union[str, TokenCharacter]] = token_chars
        self._custom_token_chars: str = custom_token_chars
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._min_gram is not None:
            body["min_gram"] = self._min_gram
        if self._max_gram is not None:
            body["max_gram"] = self._max_gram
        if self._token_chars:
            body["token_chars"] = [_tc.value if isinstance(_tc, TokenCharacter) else _tc for _tc in self._token_chars]
        if self._custom_token_chars:
            body["custom_token_chars"] = self._custom_token_chars
        return body




