"""
@author: 江同学呀
@file: analyzer.py
@date: 2024/11/24 16:27
@desc:
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-analyzers.html
"""
from typing import Dict, Union, Type, List

from espc.orm.model.base.base import _Base
from espc.orm.model.text_analysis.tokenizer.base_tokenizer import BaseTokenizer
from espc.orm.model.text_analysis.tokenizer.structured_text_tokenizer import KeywordTokenizer, PatternTokenizer
from espc.orm.model.text_analysis.tokenizer.word_oriented_tokenizer import LowercaseTokenizer, StandardTokenizer, \
    WhitespaceTokenizer


class BaseAnalyzer(_Base):
    """
    分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-custom-analyzer.html

    :param type_:
        Analyzer type. Accepts built-in analyzer types. For custom analyzers, use custom or omit this parameter.
        Analyzer 类型。接受内置分析器类型。对于自定义分析器，请使用 custom 或省略此参数。
    :param tokenizer:
        A built-in or customised tokenizer. (Required)
        内置或自定义的分词器。（必填）
    :param char_filter:
        An optional array of built-in or customised character filters.
        内置或自定义字符过滤器的可选数组。
    :param filter:
        An optional array of built-in or customised token filters.
        内置或自定义令牌筛选器的可选数组。
    :param position_increment_gap:
        When indexing an array of text values, Elasticsearch inserts a fake "gap" between the last term of one value
        and the first term of the next value to ensure that a phrase query doesn’t match two terms from different array
        elements. Defaults to 100. See position_increment_gap for more.
        在为文本值数组编制索引时，Elasticsearch 会在一个值的最后一个词和下一个值的第一个词之间插入一个假的 “间隙”，以确保短语查询不会匹配来自
        不同数组元素的两个词。默认值为 100。有关更多信息，请参阅 position_increment_gap。
    """
    type: str
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]]

    def __init__(
            self, name: str, tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]],
            char_filter: List[str] = None, filter_: List[str] = None, position_increment_gap: int = None, **kwargs
    ):
        super().__init__(**kwargs)
        self.name: str = name
        self._tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = tokenizer
        self._char_filter: List[str] = char_filter
        self._filter: List[str] = filter_
        self._position_increment_gap: int = position_increment_gap
        return

    def _build(self) -> Dict:
        body: Dict = {}
        if self.type:
            body["type"] = self.type
        if self._tokenizer:
            if isinstance(self._tokenizer, str):
                body["tokenizer"] = self._tokenizer
            else:
                body["tokenizer"] = self._tokenizer.type
        if self._char_filter:
            body["char_filter"] = self._char_filter
        if self._filter:
            body["filter"] = self._filter
        if self._position_increment_gap is not None:
            body["position_increment_gap"] = self._position_increment_gap
        return body


class StandardAnalyzer(BaseAnalyzer):
    """
    标准分析仪
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-standard-analyzer.html

    The standard analyzer is the default analyzer which is used if none is specified. It provides grammar based
    tokenization (based on the Unicode Text Segmentation algorithm, as specified in Unicode Standard Annex #29) and
    works well for most languages.
    标准分析器是默认分析器，如果未指定任何分析器，则使用该分析器。它提供基于语法的分词化（基于 Unicode 文本分割算法，如 Unicode 标准附件 #29
    中所指定），并且适用于大多数语言。

    :param max_token_length:
        The maximum token length. If a token is seen that exceeds this length then it is split at max_token_length
        intervals. Defaults to 255.
        最大令牌长度。如果看到的令牌超过此长度，则以 max_token_length 间隔拆分该令牌。默认值为 255。
    :param stopwords:
        A pre-defined stop words list like _english_ or an array containing a list of stop words. Defaults to _none_.
        预定义的停用词列表（如 _english_）或包含停用词列表的数组。默认为 _none_。
    :param stopwords_path:
        The path to a file containing stop words.
        包含停用词的文件的路径。
    """
    type: str = "standard"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = StandardTokenizer

    def __init__(
            self, name: str, max_token_length: int = None, stopwords: str = None, stopwords_path: str = None,
            char_filter: List[str] = None, filter_: List[str] = None, position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=StandardTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        self._max_token_length: int = max_token_length
        self._stopwords: str = stopwords
        self._stopwords_path: str = stopwords_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._max_token_length is not None:
            body["max_token_length"] = self._max_token_length
        if self._stopwords:
            body["stopwords"] = self._stopwords
        if self._stopwords_path:
            body["stopwords_path"] = self._stopwords_path
        return body


class SimpleAnalyzer(BaseAnalyzer):
    """
    简单分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-simple-analyzer.html

    The simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and
    apostrophes, discards non-letter characters, and changes uppercase to lowercase.
    简单分析器将文本拆分为任何非字母字符（如数字、空格、连字符和撇号）的标记，丢弃非字母字符，并将大写更改为小写。

    """
    type: str = "simple"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = LowercaseTokenizer

    def __init__(
            self, name: str, char_filter: List[str] = None, filter_: List[str] = None,
            position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=LowercaseTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        return

    def _build(self) -> Dict:
        return super()._build()


class WhitespaceAnalyzer(BaseAnalyzer):
    """
    空白分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-whitespace-analyzer.html

    The whitespace analyzer breaks text into terms whenever it encounters a whitespace character.
    空格分析器在遇到空格字符时将文本分解为多个术语。
    """
    type: str = "whitespace"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = WhitespaceTokenizer

    def __init__(
            self, name: str, char_filter: List[str] = None, filter_: List[str] = None,
            position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=WhitespaceTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        return

    def _build(self) -> Dict:
        return super()._build()


class StopAnalyzer(BaseAnalyzer):
    """
    停止分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-stop-analyzer.html

    The stop analyzer is the same as the simple analyzer but adds support for removing stop words. It defaults to using
    the _english_ stop words.
    停止分析器与简单分析器相同，但增加了对删除停用词的支持。它默认使用 _english_ 停用词。

    :param stopwords:
        A pre-defined stop words list like _english_ or an array containing a list of stop words. Defaults to _none_.
        预定义的停用词列表（如 _english_）或包含停用词列表的数组。默认为 _none_。
    :param stopwords_path:
        The path to a file containing stop words. This path is relative to the Elasticsearch config directory.
        包含停用词的文件的路径。此路径是相对于 Elasticsearch 配置目录的。
    """
    type: str = "stop"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = LowercaseTokenizer

    def __init__(
            self, name: str, stopwords: str = None, stopwords_path: str = None, char_filter: List[str] = None,
            filter_: List[str] = None, position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=LowercaseTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        self._stopwords: str = stopwords
        self._stopwords_path: str = stopwords_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._stopwords:
            body["stopwords"] = self._stopwords
        if self._stopwords_path:
            body["stopwords_path"] = self._stopwords_path
        return body


class KeywordAnalyzer(BaseAnalyzer):
    """
    关键字分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-keyword-analyzer.html

    The keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.
    关键字分析器是一个 “noop” 分析器，它将整个输入字符串作为单个标记返回。
    """
    type: str = "keyword"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = KeywordTokenizer

    def __init__(
            self, name: str, char_filter: List[str] = None, filter_: List[str] = None,
            position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=KeywordTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        return

    def _build(self) -> Dict:
        return super()._build()


class PatternAnalyzer(BaseAnalyzer):
    r"""
    形态分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-pattern-analyzer.html

    The pattern analyzer uses a regular expression to split the text into terms. The regular expression should match
    the token separators not the tokens themselves. The regular expression defaults to \W+ (or all non-word characters).
    模式分析器使用正则表达式将文本拆分为术语。正则表达式应匹配标记分隔符，而不是标记本身。正则表达式默认为 \W+ （或所有非单词字符）。

    :param pattern:
        A Java regular expression, defaults to \W+.
        Java 正则表达式，默认为 \W+。
    :param flags:
        Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS".
        Java 正则表达式标志。标志应以管道分隔，例如 “CASE_INSENSITIVE|评论”。
    :param lowercase:
        Should terms be lowercased or not. Defaults to true.
        术语应该小写`还是不小写`。默认为 true。
    :param stopwords:
        A pre-defined stop words list like _english_ or an array containing a list of stop words. Defaults to _none_.
        预定义的停用词列表（如 _english_）或包含停用词列表的数组。默认为 _none_。
    :param stopwords_path:
        The path to a file containing stop words. This path is relative to the Elasticsearch config directory.
        包含停用词的文件的路径。此路径是相对于 Elasticsearch 配置目录的。
    """
    type: str = "pattern"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = PatternTokenizer

    def __init__(
            self, name: str, pattern: str = None, flags: str = None, lowercase: bool = None, stopwords: str = None,
            stopwords_path: str = None, char_filter: List[str] = None, filter_: List[str] = None,
            position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=PatternTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        self._pattern: str = pattern
        self._flags: str = flags
        self._lowercase: bool = lowercase
        self._stopwords: str = stopwords
        self._stopwords_path: str = stopwords_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._pattern:
            body["pattern"] = self._pattern
        if self._flags:
            body["flags"] = self._flags
        if self._lowercase is not None:
            body["lowercase"] = self._lowercase
        if self._stopwords:
            body["stopwords"] = self._stopwords
        if self._stopwords_path:
            body["stopwords_path"] = self._stopwords_path
        return body


class LanguageAnalyzer(BaseAnalyzer):
    """
    语言分析器
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-lang-analyzer.html

    A set of analyzers aimed at analyzing specific language text. The following types are supported: arabic, armenian,
    basque, bengali, brazilian, bulgarian, catalan, cjk, czech, danish, dutch, english, estonian, finnish, french,
    galician, german, greek, hindi, hungarian, indonesian, irish, italian, latvian, lithuanian, norwegian, persian,
    portuguese, romanian, russian, sorani, spanish, swedish, turkish, thai.
    一组旨在分析特定语言文本的分析器。支持以下类型：阿拉伯语、亚美尼亚语、巴斯克语、孟加拉语、巴西语、保加利亚语、加泰罗尼亚语、CJK、捷克语、
    丹麦语、荷兰语、英语、爱沙尼亚语、芬兰语、法语， 加利西亚语， 德语， 希腊语， 印地语， 匈牙利语， 印度尼西亚语， 爱尔兰语， 意大利语，
    拉脱维亚语， 立陶宛语， 挪威语， 波斯语， 葡萄牙语， 罗马尼亚语， 俄语、索拉尼语、西班牙语、瑞典语、土耳其语、泰语。
    """
    type: str = "pattern"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = StandardTokenizer

    def __init__(
            self, name: str, tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = StandardTokenizer,
            char_filter: List[str] = None, filter_: List[str] = None, position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=tokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        return

    def _build(self) -> Dict:
        return super()._build()


class FingerprintAnalyzer(BaseAnalyzer):
    """
    指纹分析仪
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-fingerprint-analyzer.html

    The fingerprint analyzer implements a fingerprinting algorithm which is used by the OpenRefine project to assist in
    clustering.
    Input text is lowercased, normalized to remove extended characters, sorted, deduplicated and concatenated into a
    single token. If a stopword list is configured, stop words will also be removed.
    指纹分析器实现了一种指纹识别算法，OpenRefine 项目使用该算法来协助集群。
    输入文本为小写文本，经过规范化以删除扩展字符，排序、去重和连接成单个标记。如果配置了非索引字列表，则还将删除非索引字。

    :param separator:
        The character to use to concatenate the terms. Defaults to a space.
        用于连接术语的字符。默认为空格。
    :param max_output_size:
        The maximum token size to emit. Defaults to 255. Tokens larger than this size will be discarded.
        要发出的最大令牌大小。默认值为 255。大于此大小的令牌将被丢弃。
    :param stopwords:
        A pre-defined stop words list like _english_ or an array containing a list of stop words. Defaults to _none_.
        预定义的停用词列表（如 _english_）或包含停用词列表的数组。默认为 _none_。
    :param stopwords_path:
        The path to a file containing stop words. This path is relative to the Elasticsearch config directory.
        包含停用词的文件的路径。此路径是相对于 Elasticsearch 配置目录的。
    """
    type: str = "fingerprint"
    tokenizer: Union[str, BaseTokenizer, Type[BaseTokenizer]] = StandardTokenizer

    def __init__(
            self, name: str, separator: str = None, max_output_size: int = None, stopwords: str = None,
            stopwords_path: str = None, char_filter: List[str] = None, filter_: List[str] = None,
            position_increment_gap: int = None, **kwargs
    ):
        super().__init__(
            name=name, tokenizer=StandardTokenizer, char_filter=char_filter, filter_=filter_,
            position_increment_gap=position_increment_gap, **kwargs
        )
        self._separator: str = separator
        self._max_output_size: int = max_output_size
        self._stopwords: str = stopwords
        self._stopwords_path: str = stopwords_path
        return

    def _build(self) -> Dict:
        body: Dict = super()._build()
        if self._separator:
            body["separator"] = self._separator
        if self._max_output_size is not None:
            body["max_output_size"] = self._max_output_size
        if self._stopwords:
            body["stopwords"] = self._stopwords
        if self._stopwords_path:
            body["stopwords_path"] = self._stopwords_path
        return body











