"""
@author: 江同学呀
@file: word_oriented_tokenizer.py
@date: 2024/11/24 16:44
@desc: 
"""
from typing import Dict

from espc.orm.model.text_analysis.tokenizer.base_tokenizer import BaseTokenizer


class StandardTokenizer(BaseTokenizer):
    """
    标准分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-standard-tokenizer.html

    The standard tokenizer provides grammar based tokenization (based on the Unicode Text Segmentation algorithm, as
    specified in Unicode Standard Annex #29) and works well for most languages.
    标准分词器提供基于语法的分词化（基于 Unicode 文本分割算法，如 Unicode 标准附录 #29 中所指定），并且适用于大多数语言。

    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    """
    type: str = "standard"

    def __init__(self, max_token_length: int = None, **kwargs):
        super().__init__(**kwargs)
        self._max_token_length: int = max_token_length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        return body


class LetterTokenizer(BaseTokenizer):
    """
    字母分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-letter-tokenizer.html

    The letter tokenizer breaks text into terms whenever it encounters a character which is not a letter. It does a
    reasonable job for most European languages, but does a terrible job for some Asian languages, where words are not
    separated by spaces.
    字母分词器在遇到非字母字符时将文本分解为多个术语。它对大多数欧洲语言来说做得还算不错，但对一些亚洲语言来说效果很糟糕，因为这些语言的单词没有
    用空格分隔。
    """
    type: str = "letter"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class LowercaseTokenizer(BaseTokenizer):
    """
    小写分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-lowercase-tokenizer.html

    The lowercase tokenizer, like the letter tokenizer breaks text into terms whenever it encounters a character which
    is not a letter, but it also lowercases all terms. It is functionally equivalent to the letter tokenizer combined
    with the lowercase token filter, but is more efficient as it performs both steps in a single pass.
    小写分词器，就像字母分词器一样，每当遇到非字母字符时，都会将文本分成多个术语，但它也会将所有术语小写。它在功能上等同于字母分词器与小写分词过
    滤器的组合，但效率更高，因为它在一次传递中执行这两个步骤。
    """
    type: str = "lowercase"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()


class WhitespaceTokenizer(BaseTokenizer):
    """
    空白分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-whitespace-tokenizer.html

    The whitespace tokenizer breaks text into terms whenever it encounters a whitespace character.
    每当文本遇到空白字符时，空白分词器就会将文本分解为多个术语。

    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    """
    type: str = "whitespace"

    def __init__(self, max_token_length: int = None, **kwargs):
        super().__init__(**kwargs)
        self._max_token_length: int = max_token_length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        return body


class UAXURLEmailTokenizer(BaseTokenizer):
    """
    UAX URL 电子邮件分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-uaxurlemail-tokenizer.html

    The uax_url_email tokenizer is like the standard tokenizer except that it recognises URLs and email addresses as
    single tokens.
    uax_url_email 分词器与标准分词器类似，不同之处在于它将 URL 和电子邮件地址识别为单个令牌。

    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    """
    type: str = "uax_url_email"

    def __init__(self, max_token_length: int = None, **kwargs):
        super().__init__(**kwargs)
        self._max_token_length: int = max_token_length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        return body


class ClassicTokenizer(BaseTokenizer):
    """
    经典分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-classic-tokenizer.html

    The lowercase tokenizer, like the letter tokenizer breaks text into terms whenever it encounters a character which
    is not a letter, but it also lowercases all terms. It is functionally equivalent to the letter tokenizer combined
    with the lowercase token filter, but is more efficient as it performs both steps in a single pass.
    小写分词器，就像字母分词器一样，每当遇到非字母字符时，都会将文本分成多个术语，但它也会将所有术语小写。它在功能上等同于字母分词器与小写分词过
    滤器的组合，但效率更高，因为它在一次传递中执行这两个步骤。

    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    """
    type: str = "classic"

    def __init__(self, max_token_length: int = None, **kwargs):
        super().__init__(**kwargs)
        self._max_token_length: int = max_token_length
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        return body


class ThaiTokenizer(BaseTokenizer):
    """
    泰语分词器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-thai-tokenizer.html

    The thai tokenizer segments Thai text into words, using the Thai segmentation algorithm included with Java. Text in
    other languages in general will be treated the same as the standard tokenizer.
    泰语分词器使用 Java 附带的泰语分割算法将泰语文本分割为单词。其他语言的文本通常将被视为与标准分词器相同。
    """
    type: str = "thai"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    def _build(self) -> Dict:
        return super()._build()




