"""
@author: 江同学呀
@file: normalizer.py
@date: 2024/11/25 21:44
@desc:
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/analysis-normalizers.html
"""
from typing import List, Union, Dict

from espc.orm.model.base.base import _Base
from espc.orm.model.text_analysis.character_filter import _BaseCharacterFilter
from espc.orm.model.text_analysis.token_filter import TokenFilter


class Normalizer(_Base):
    """
    标准化器

    Normalizers are similar to analyzers except that they may only emit a single token. As a consequence, they do not
    have a tokenizer and only accept a subset of the available char filters and token filters. Only the filters that
    work on a per-character basis are allowed. For instance a lowercasing filter would be allowed, but not a stemming
    filter, which needs to look at the keyword as a whole. The current list of filters that can be used in a normalizer
    definition are: arabic_normalization, asciifolding, bengali_normalization, cjk_width, decimal_digit, elision,
    german_normalization, hindi_normalization, indic_normalization, lowercase, persian_normalization,
    scandinavian_folding, serbian_normalization, sorani_normalization, uppercase.
    Elasticsearch ships with a lowercase built-in normalizer. For other forms of normalization, a custom configuration
    is required.
    规范化器与分析器类似，不同之处在于它们只能发出单个令牌。因此，它们没有分词器，只接受可用 char filters 和 token filters 的子集。
    只允许基于每个字符的过滤器。例如，允许使用小写过滤器，但不允许使用词干过滤器，这需要将关键字作为一个整体来查看。当前可在规范化器定义中使用的
    过滤器列表包括：arabic_normalization、asciifolding、bengali_normalization、cjk_width、decimal_digit、省略、
    german_normalization、hindi_normalization、indic_normalization、小写、persian_normalization、scandinavian_folding、
    serbian_normalization、sorani_normalization、 uppercase。
    Elasticsearch 附带一个小写的内置规范化器。对于其他形式的规范化，需要自定义配置。
    """
    def __init__(
            self, name: str, char_filter: List[Union[str, _BaseCharacterFilter]],
            filter_: List[Union[str, TokenFilter]], type_: str = "custom", *args, **kwargs
    ):
        super().__init__(*args, **kwargs)
        self.name: str = name
        self._type: str = type_
        self._char_filter: List[Union[str, _BaseCharacterFilter]] = char_filter
        self._filter: List[Union[str, TokenFilter]] = filter_
        return

    def _build(self) -> Dict:
        return {
            "type": self._type,
            "char_filter": [
                _cf if isinstance(_cf, _BaseCharacterFilter) else _cf
                for _cf in self._char_filter
            ],
            "filter": [
                _f.type if isinstance(_f, TokenFilter) else _f
                for _f in self._filter
            ],
        }
