"""
@author: 江同学呀
@file: highlighting.py
@date: 2025/1/8 21:34
@desc:
    突出

    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/highlighting.html

    todo 增加高亮类和方法
"""
from typing import Union, Literal, List, Dict

from espc.common.highlight_common import BoundaryScanner, Fragmenter, HighlightOrder, HighlightType
from espc.orm.model.base.base import _Base
from espc.orm.model.mapping.field.base_field.base_field import _BaseField


class Highlight(_Base):
    """
    高亮
    https://www.elastic.co/guide/en/elasticsearch/reference/7.17/highlighting.html

    :param boundary_chars:
        A string that contains each boundary character. Defaults to .,!? \t\n.
        包含每个边界字符的字符串。默认为 .,!? \t\n。
    :param boundary_max_scan:
        How far to scan for boundary characters. Defaults to 20.
        扫描边界字符的距离。默认值为 20。
    :param boundary_scanner:
        Specifies how to break the highlighted fragments: chars, sentence, or word. Only valid for the unified and fvh
        highlighters. Defaults to sentence for the unified highlighter. Defaults to chars for the fvh highlighter.
            - chars
                Use the characters specified by boundary_chars as highlighting boundaries. The boundary_max_scan setting
                controls how far to scan for boundary characters. Only valid for the fvh highlighter.
            - sentence
                Break highlighted fragments at the next sentence boundary, as determined by Java’s BreakIterator. You
                can specify the locale to use with boundary_scanner_locale.
                When used with the unified highlighter, the sentence scanner splits sentences bigger than fragment_size
                at the first word boundary next to fragment_size. You can set fragment_size to 0 to never split any
                sentence.

        指定如何断开高亮显示的片段：chars、sentence 或 word。仅对 unified 和 fvh 荧光笔有效。默认为 uniform highlighter的 sentence。
        默认为 fvh 荧光笔的 chars。
            word
                Break highlighted fragments at the next word boundary, as determined by Java’s BreakIterator. You can
                specify the locale to use with boundary_scanner_locale.
            chars
                使用 boundary_chars 指定的字符作为高亮显示边界。boundary_max_scan 设置控制扫描边界字符的距离。仅对 fvh 荧光笔有效。
            sentence
                在下一个句子边界处断开高亮显示的片段，由 Java 的 BreakIterator 确定。您可以指定要用于 boundary_scanner_locale 的区域设置。
                与统一荧光笔一起使用时，句子扫描器会在 fragment_size 旁边的第一个单词边界处拆分大于 fragment_size 的句子。您可以将
                fragment_size 设置为 0 以从不拆分任何句子。
            word
                在下一个单词边界处断开高亮显示的片段，由 Java 的 BreakIterator 确定。您可以指定要用于 boundary_scanner_locale 的区域设置。
    :param boundary_scanner_locale:
        Controls which locale is used to search for sentence and word boundaries. This parameter takes a form of a
        language tag, e.g. "en-US", "fr-FR", "ja-JP". More info can be found in the Locale Language Tag documentation.
        The default value is Locale.ROOT.
        控制使用哪个区域设置来搜索句子和单词边界。此参数采用语言标记的形式，例如 “en-US”、“fr-FR”、“ja-JP”。更多信息可以在 区域设置语言标记
        文档中找到。默认值为 Locale.ROOT。
    :param encoder:
        Indicates if the snippet should be HTML encoded: default (no encoding) or html (HTML-escape the snippet text
        and then insert the highlighting tags)
        指示代码段是否应使用 HTML 编码：default（无编码）或 html（HTML 转义代码段文本，然后插入高亮标记）
    :param fields:
        Specifies the fields to retrieve highlights for. You can use wildcards to specify fields. For example, you could
        specify comment_* to get highlights for all text, match_only_text, and keyword fields that start with comment_.
        Only text, match_only_text, and keyword fields are highlighted when you use wildcards. If you use a custom
        mapper and want to highlight on a field anyway, you must explicitly specify that field name.

        指定要为其检索突出显示的字段。您可以使用通配符指定字段。例如，您可以指定 comment_* 以获取以 comment_ 开头的所有文本、
        match_only_text和关键字字段的突出显示。
        使用通配符时，仅突出显示 text、match_only_text 和 keyword 字段。如果您使用自定义映射器并希望在字段上突出显示，则必须显式指定该字段
        名称。
    :param force_source:
        Highlight based on the source even if the field is stored separately. Defaults to false.
        根据源突出显示，即使字段单独存储也是如此。默认为 false。
    :param fragmenter:
        Specifies how text should be broken up in highlight snippets: simple or span. Only valid for the plain
        highlighter. Defaults to span.
            - simple
                Breaks up text into same-sized fragments.
            - span
                Breaks up text into same-sized fragments, but tries to avoid breaking up text between highlighted terms.
                 This is helpful when you’re querying for phrases. Default.

        指定文本在高亮片段中应如何拆分：simple 或 span。仅对普通荧光笔有效。默认为 span。
            - simple
                将文本拆分为大小相同的片段。
            - span
                将文本拆分为大小相同的片段，但尽量避免在突出显示的字词之间拆分文本。这在查询短语时非常有用。违约。
    :param fragment_offset:
        Controls the margin from which you want to start highlighting. Only valid when using the fvh highlighter.
        控制要从中开始高亮显示的边距。仅在使用 fvh 高亮显示器时有效。
    :param fragment_size:
        The size of the highlighted fragment in characters. Defaults to 100.
        高亮显示的片段的大小（以字符为单位）。默认值为 100。
    :param highlight_query:
        Highlight matches for a query other than the search query. This is especially useful if you use a rescore query
        Elasticsearch does not validate that highlight_query contains the search query in any way so it is possible to
        define it so legitimate query results are not highlighted. Generally, you should include the search query as
        part of the highlight_query.
        because those are not taken into account by highlighting by default.

        突出显示搜索查询以外的查询的匹配项。如果您使用 rescore 查询，这尤其有用，因为默认情况下突出显示不会考虑这些查询。
        Elasticsearch 不会以任何方式验证 highlight_query 是否包含搜索查询，因此可以对其进行定义，因此不会突出显示合法的查询结果。通常，
        您应该将搜索查询作为highlight_query的一部分。
    :param matched_fields:
        Combine matches on multiple fields to highlight a single field. This is most intuitive for multifields that
        analyze the same string in different ways. All matched_fields must have term_vector set to
        with_positions_offsets, but only the field to which the matches are combined is loaded so only that field
        benefits from having store set to yes. Only valid for the fvh highlighter.
        合并多个字段上的匹配项以突出显示单个字段。这对于以不同方式分析同一字符串的多字段最直观。所有matched_fields都必须将 term_vector
        设置为 with_positions_offsets，但仅加载匹配项合并到的字段，因此只有该字段受益于将 store 设置为 yes。仅对 fvh 荧光笔有效。
    :param no_match_size:
        The amount of text you want to return from the beginning of the field if there are no matching fragments to
        highlight. Defaults to 0 (nothing is returned).
        如果没有要突出显示的匹配片段，则要从字段开头返回的文本量。默认为 0 （不返回任何内容）。
    :param number_of_fragments:
        The maximum number of fragments to return. If the number of fragments is set to 0, no fragments are returned.
        Instead, the entire field contents are highlighted and returned. This can be handy when you need to highlight
        short texts such as a title or address, but fragmentation is not required. If number_of_fragments is 0,
        fragment_size is ignored. Defaults to 5.
        要返回的最大片段数。如果片段数设置为 0，则不会返回任何片段。相反，会突出显示并返回整个字段内容。当您需要突出显示短文本（如标题或地址）时，
        这可能很方便，但不需要分段。如果 number_of_fragments 为 0，则忽略 fragment_size。默认为 5。
    :param order:
        Sorts highlighted fragments by score when set to score. By default, fragments will be output in the order they
        appear in the field (order: none). Setting this option to score will output the most relevant fragments first.
        Each highlighter applies its own logic to compute relevancy scores. See the document How highlighters work
        internally for more details how different highlighters find the best fragments.
        设置为 score 时，按分数对突出显示的片段进行排序。默认情况下，片段将按它们在字段中的显示顺序输出（顺序：none）。将此选项设置为 score
        将首先输出最相关的片段。每个荧光笔都应用自己的逻辑来计算相关性分数。有关不同荧光笔如何查找最佳片段的更多详细信息，请参阅文档 荧光笔在内
        部的工作原理。
    :param phrase_limit:
        Controls the number of matching phrases in a document that are considered. Prevents the fvh highlighter from
        analyzing too many phrases and consuming too much memory. When using matched_fields, phrase_limit phrases per
        matched field are considered. Raising the limit increases query time and consumes more memory. Only supported
        by the fvh highlighter. Defaults to 256.
        控制文档中考虑的匹配短语的数量。防止 fvh 荧光笔分析过多的短语并消耗过多的内存。使用 matched_fields 时，每个匹配字段考虑
        phrase_limit短语。提高限制会增加查询时间并消耗更多内存。仅受 fvh 荧光笔支持。默认值为 256。
    :param pre_tags:
        Use in conjunction with post_tags to define the HTML tags to use for the highlighted text. By default,
        highlighted text is wrapped in <em> and </em> tags. Specify as an array of strings.
        与 post_tags 结合使用，可定义用于高亮文本的 HTML 标签。默认情况下，高亮显示的文本将包含在 <em> 和 </em> 标签中。指定为字符串数组。
    :param post_tags:
        Use in conjunction with pre_tags to define the HTML tags to use for the highlighted text. By default,
        highlighted text is wrapped in <em> and </em> tags. Specify as an array of strings.
        与 pre_tags 结合使用，以定义要用于高亮文本的 HTML 标签。默认情况下，高亮显示的文本包含在 <em> 和 </em> 标签中。指定为字符串数组。
    :param require_field_match:
        By default, only fields that contains a query match are highlighted. Set require_field_match to false to
        highlight all fields. Defaults to true.
        默认情况下，仅突出显示包含查询匹配项的字段。将 require_field_match 设置为 false 以突出显示所有字段。默认为 true。
    :param max_analyzed_offset:
        By default, the maximum number of characters analyzed for a highlight request is bounded by the value defined
        in the index.highlight.max_analyzed_offset setting, and when the number of characters exceeds this limit an
        error is returned. If this setting is set to a non-negative value, the highlighting stops at this defined
        maximum limit, and the rest of the text is not processed, thus not highlighted and no error is returned. The
        max_analyzed_offset query setting does not override the index.highlight.max_analyzed_offset which prevails when
        it’s set to lower value than the query setting.
        默认情况下，为高亮请求分析的最大字符数受 index.highlight.max_analyzed_offset 设置中定义的值限制，当字符数超过此限制时，将返回错误。
        如果此设置设置为非负值，则高亮显示将在此定义的最大限制处停止，并且不会处理文本的其余部分，因此不会高亮显示，也不会返回错误。
        max_analyzed_offset查询设置不会覆盖 index.highlight.max_analyzed_offset，当该值设置为低于查询设置的值时，该设置将占上风。
    :param tags_schema:
        Set to styled to use the built-in tag schema. The styled schema defines the following pre_tags and defines
        post_tags as </em>.
        设置为 styled 以使用内置标记架构。样式化架构定义以下pre_tags，并将post_tags定义为 </em>。
    :param type_:
        The highlighter to use: unified, plain, or fvh. Defaults to unified.
        要使用的荧光笔：unified、plain 或 fvh。默认为 unified。
    """

    def __init__(
            self, boundary_chars: str = None, boundary_max_scan: int = None, boundary_max_scan_length: int = None,
            boundary_scanner: Union[BoundaryScanner, Literal["chars", "sentence", "word"]] = None,
            boundary_scanner_locale: str = None, encoder: str = None,
            fields: Dict[Union[str, _BaseField], Union["Highlight", Dict]] = None, force_source: bool = None,
            highlight_query: str = None, matched_fields: List[str] = None, no_match_size: int = None,
            number_of_fragments: int = None, boundary_scanner_query: str = None, boundary_scanner_type: str = None,
            fragmenter: Union[Fragmenter, Literal["simple", "span"]] = None, fragment_offset: int = None,
            fragment_size: int = None, order: Union[HighlightOrder, Literal["none", "score"]] = None,
            phrase_limit: int = None, pre_tags: Union[str, List[str]] = None, post_tags: Union[str, List[str]] = None,
            require_field_match: bool = None, max_analyzed_offset: int = None, tags_schema: str = None,
            type_: Union[HighlightType, Literal["fvh", "plain", "unified"]] = None
    ):
        super().__init__()
        self._boundary_chars: str = boundary_chars
        self._boundary_max_scan: int = boundary_max_scan
        self._boundary_max_scan_length: int = boundary_max_scan_length
        self._boundary_scanner: Union[BoundaryScanner, Literal["chars", "sentence", "word"], None] = boundary_scanner
        self._boundary_scanner_locale: str = boundary_scanner_locale
        self._encoder: str = encoder
        self._fields: Dict[Union[str, _BaseField], Union["Highlight", Dict]] = fields
        self._force_source: bool = force_source
        self._highlight_query: str = highlight_query
        self._matched_fields: List[str] = matched_fields
        self._no_match_size: int = no_match_size
        self._number_of_fragments: int = number_of_fragments
        self._boundary_scanner_query: str = boundary_scanner_query
        self._boundary_scanner_type: str = boundary_scanner_type
        self._fragmenter: Union[Fragmenter, Literal["simple", "span"], None] = fragmenter
        self._fragment_offset: int = fragment_offset
        self._fragment_size: int = fragment_size
        self._order: Union[HighlightOrder, Literal["none", "score"], None] = order
        self._phrase_limit: int = phrase_limit
        self._pre_tags: Union[str, List[str]] = pre_tags
        self._post_tags: Union[str, List[str]] = post_tags
        self._require_field_match: bool = require_field_match
        self._max_analyzed_offset: int = max_analyzed_offset
        self._tags_schema: str = tags_schema
        self._type_: Union[HighlightType, Literal["fvh", "plain", "unified"], None] = type_
        return

    def _build(self) -> Dict:
        _body: Dict = {}
        if self._boundary_chars:
            _body["boundary_chars"] = self._boundary_chars
        if self._boundary_max_scan is not None:
            _body["boundary_max_scan"] = self._boundary_max_scan
        if self._boundary_max_scan_length is not None:
            _body["boundary_max_scan_length"] = self._boundary_max_scan_length
        if self._boundary_scanner:
            _body["boundary_scanner"] = (
                self._boundary_scanner if isinstance(self._boundary_scanner, str) else self._boundary_scanner.value
            )
        if self._boundary_scanner_locale:
            _body["boundary_scanner_locale"] = self._boundary_scanner_locale
        if self._encoder:
            _body["encoder"] = self._encoder
        if self._fields:
            _body["fields"] = {
                _field if isinstance(_field, str) else _field._field_name:
                    _field_highlight if isinstance(_field_highlight, Dict) else _field_highlight._build()
                for _field, _field_highlight in self._fields.items()
            }
        if self._force_source is not None:
            _body["force_source"] = self._force_source
        if self._highlight_query:
            _body["highlight_query"] = self._highlight_query
        if self._matched_fields:
            _body["matched_fields"] = self._matched_fields
        if self._no_match_size is not None:
            _body["no_match_size"] = self._no_match_size
        if self._number_of_fragments is not None:
            _body["number_of_fragments"] = self._number_of_fragments
        if self._boundary_scanner_query:
            _body["boundary_scanner_query"] = self._boundary_scanner_query
        if self._boundary_scanner_type:
            _body["boundary_scanner_type"] = self._boundary_scanner_type
        if self._fragmenter:
            _body["fragmenter"] = self._fragmenter if isinstance(self._fragmenter, str) else self._fragmenter.value
        if self._fragment_offset is not None:
            _body["fragment_offset"] = self._fragment_offset
        if self._fragment_size is not None:
            _body["fragment_size"] = self._fragment_size
        if self._order:
            _body["order"] = self._order if isinstance(self._order, str) else self._order.value
        if self._phrase_limit is not None:
            _body["phrase_limit"] = self._phrase_limit
        if self._pre_tags:
            _body["pre_tags"] = self._pre_tags if isinstance(self._pre_tags, list) else [self._pre_tags]
        if self._post_tags:
            _body["post_tags"] = self._post_tags if isinstance(self._post_tags, list) else [self._post_tags]
        if self._require_field_match is not None:
            _body["require_field_match"] = self._require_field_match
        if self._max_analyzed_offset is not None:
            _body["max_analyzed_offset"] = self._max_analyzed_offset
        if self._tags_schema:
            _body["tags_schema"] = self._tags_schema
        if self._type_:
            _body["type"] = self._type_ if isinstance(self._type_, str) else self._type_.value
        return _body




