from typing import Any, Dict, List, Text, Tuple, Optional, Union

from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.components import Component
from rasa.nlu.constants import EXTRACTOR, ENTITIES, TOKENS_NAMES, TEXT
from rasa.nlu.training_data import Message


class EntityExtractor(Component):
    """实体抽取"""

    def add_extractor_name(
        self, entities: List[Dict[Text, Any]]
    ) -> List[Dict[Text, Any]]:
        """添加实体检测器名称"""

        for entity in entities:
            entity[EXTRACTOR] = self.name
        return entities

    def add_processor_name(self, entity: Dict[Text, Any]) -> Dict[Text, Any]:
        if "processors" in entity:
            entity["processors"].append(self.name)
        else:
            entity["processors"] = [self.name]

        return entity

    def clean_up_entities(
        self, message: Message, entities: List[Dict[Text, Any]], keep: bool = True
    ) -> List[Dict[Text, Any]]:
        """
        实体清洗：
        （1）是否将多个实体标签分配给一个单词；
        （2）或者是否将实体标签仅分配给一个单词的一部分；
        （3）或者一个实体标签是否覆盖多个单词，但一个单词仅覆盖一部分。
        如果您正在使用将词进行分词，并且为各个子词分配了不同的实体标签，则可能会发生这种情况。
        如果将多个实体标签分配给一个单词，则我们将具有最高置信度的实体标签保留为该单词的实体标签。
        如果仅注释单词的一部分，则将为整个单词使用该实体标签。 如果将'keep'设置为'False'，则该单词的所有实体标签都将被删除。

        keep:
            如果设置为“True”，则保留具有最高置信度的实体标签，如果多个实体标签分配给一个词。
            如果设置为“False”，该词的所有实体标签都将被删除。

        Returns: 更新后的实体

        """
        misaligned_entities = self._get_misaligned_entities(
            message.get(TOKENS_NAMES[TEXT]), entities
        )

        entity_indices_to_remove = set()

        for misaligned_entity in misaligned_entities:
            # 错位涉及的实体索引
            entity_indices = misaligned_entity["entity_indices"]

            if not keep:
                entity_indices_to_remove.update(entity_indices)
                continue

            idx = self._entity_index_to_keep(entities, entity_indices)

            if idx is None:
                entity_indices_to_remove.update(entity_indices)
            else:
                # 只保留一个实体
                entity_indices.remove(idx)
                entity_indices_to_remove.update(entity_indices)

                # 更新该实体以涵盖完整的词
                entities[idx]["start"] = misaligned_entity["start"]
                entities[idx]["end"] = misaligned_entity["end"]
                entities[idx]["value"] = message.text[
                    misaligned_entity["start"]: misaligned_entity["end"]
                ]

        # 排序索引以首先删除列表末尾的条目以避免索引超出范围错误
        for idx in sorted(entity_indices_to_remove, reverse=True):
            entities.remove(entities[idx])

        return entities

    def _get_misaligned_entities(
        self, tokens: List[Token], entities: List[Dict[Text, Any]]
    ) -> List[Dict[Text, Any]]:
        """识别未对齐的实体和字符
       未对齐的实体是那些仅适用于单词的一部分的实体，即：子词

        Args:
            tokens: tokens的列表
            entities: 实体抽取器抽取的实体列表

        Returns:
            未对齐的实体，包括作为此未对齐一部分的文本和实体索引中最终实体的开始和结束位置。
        """
        if not tokens:
            return []

        # group token：一个token簇对应一个词
        token_clusters = self._token_clusters(tokens)

        # 添加用于测试，仅在未设置tokens 或 len(tokens) == 1 时发生
        if not token_clusters:
            return []

        # 未对齐的实体
        misaligned_entities = []
        for entity_idx, entity in enumerate(entities):
            # 获取实体覆盖/触及的所有token
            entity_tokens = self._tokens_of_entity(entity, token_clusters)

            if len(entity_tokens) == 1:
                continue  # 实体正好涵盖一个词

            # 获取完整单词的开始和结束位置，需要稍后更新最终实体：实体所触及的token集
            start_position = entity_tokens[0].start
            end_position = entity_tokens[-1].end

            # 检查是否已经找到包含完全相同词的实体
            _idx = self._misaligned_entity_index(
                misaligned_entities, start_position, end_position
            )

            if _idx is None:
                misaligned_entities.append(
                    {
                        "start": start_position,
                        "end": end_position,
                        "entity_indices": [entity_idx],
                    }
                )
            else:
                misaligned_entities[_idx]["entity_indices"].append(entity_idx)

        return misaligned_entities

    @staticmethod
    def _misaligned_entity_index(
        word_entity_cluster: List[Dict[Text, Union[int, List[int]]]],
        start_position: int,
        end_position: int,
    ) -> Optional[int]:
        """获取匹配未对齐实体的索引。

        Args:
            word_entity_cluster: word entity cluster
            start_position: start position
            end_position: end position

        Returns:
            Index of the misaligned entity that matches the provided start and end
            position.
        """
        for idx, cluster in enumerate(word_entity_cluster):
            if cluster["start"] == start_position and cluster["end"] == end_position:
                return idx
        return None

    @staticmethod
    def _tokens_of_entity(
        entity: Dict[Text, Any], token_clusters: List[List[Token]]
    ) -> List[Token]:
        """获取实体覆盖的token集的所有token。
        实体可以完全或部分覆盖它们。返回实体所在的token簇

        返回：
            属于所提供实体的token集。

        """
        entity_tokens = []
        for token_cluster in token_clusters:
            entity_starts_inside_cluster = (
                token_cluster[0].start <= entity["start"] <= token_cluster[-1].end
            )
            entity_ends_inside_cluster = (
                token_cluster[0].start <= entity["end"] <= token_cluster[-1].end
            )

            if entity_starts_inside_cluster or entity_ends_inside_cluster:
                entity_tokens += token_cluster
        return entity_tokens

    @staticmethod
    def _token_clusters(tokens: List[Token]) -> List[List[Token]]:
        """构建属于一个词的token集"""

        # token簇 = 属于一个词的标记索引列表
        token_index_clusters = []

        # 从 1 开始，以检查当前标记和前一个标记是否属于同一个词
        for token_idx in range(1, len(tokens)):
            previous_token_idx = token_idx - 1
            # 如果两个标记之间没有其他字符，则它们属于同一个词
            if tokens[token_idx].start == tokens[previous_token_idx].end:
                # 一个词被分成多个token
                token_cluster_already_exists = (
                    token_index_clusters
                    and token_index_clusters[-1][-1] == previous_token_idx
                )
                if token_cluster_already_exists:
                    token_index_clusters[-1].append(token_idx)
                else:
                    token_index_clusters.append([previous_token_idx, token_idx])
            else:
                if token_idx == 1:
                    token_index_clusters.append([previous_token_idx])  # token对应于单个词
                token_index_clusters.append([token_idx])

        return [[tokens[idx] for idx in cluster] for cluster in token_index_clusters]

    @staticmethod
    def _entity_index_to_keep(
        entities: List[Dict[Text, Any]], entity_indices: List[int]
    ) -> Optional[int]:
        """
        确定要保留的实体索引。

        （1）如果我们只有一个实体索引，即候选者，我们返回该候选者的索引。
        （2）如果我们有多个候选，我们返回具有最高置信度分数的实体值的索引。
        （3）如果不存在置信度分数，则不会保留实体标签。

        Args:
            entities: 完整的实体列表
            entity_indices: 要考虑的实体索引

        Returns: 要保留的实体的下标
        """
        if len(entity_indices) == 1:
            return entity_indices[0]

        confidences = [
            entities[idx]["confidence"]
            for idx in entity_indices
            if "confidence" in entities[idx]
        ]

        # 我们没有所有实体标签的置信度值
        if len(confidences) != len(entity_indices):
            return None

        return confidences.index(max(confidences))

    @staticmethod
    def filter_irrelevant_entities(extracted: list, requested_dimensions: set) -> list:
        """Only return dimensions the user configured"""

        if requested_dimensions:
            return [
                entity
                for entity in extracted
                if entity["entity"] in requested_dimensions
            ]
        else:
            return extracted

    @staticmethod
    def find_entity(ent, text, tokens) -> Tuple[int, int]:
        offsets = [token.start for token in tokens]
        ends = [token.end for token in tokens]

        if ent["start"] not in offsets:
            message = (
                "Invalid entity {} in example '{}': "
                "entities must span whole tokens. "
                "Wrong entity start.".format(ent, text)
            )
            raise ValueError(message)

        if ent["end"] not in ends:
            message = (
                "Invalid entity {} in example '{}': "
                "entities must span whole tokens. "
                "Wrong entity end.".format(ent, text)
            )
            raise ValueError(message)

        start = offsets.index(ent["start"])
        end = ends.index(ent["end"]) + 1
        return start, end

    def filter_trainable_entities(
        self, entity_examples: List[Message]
    ) -> List[Message]:
        """过滤掉无法训练的实体注释。

        Creates a copy of entity_examples in which entities that have
        `extractor` set to something other than
        self.name (e.g. 'CRFEntityExtractor') are removed.
        """

        filtered = []
        for message in entity_examples:
            entities = []
            for ent in message.get(ENTITIES, []):
                extractor = ent.get(EXTRACTOR)
                if not extractor or extractor == self.name:
                    entities.append(ent)
            data = message.data.copy()
            data[ENTITIES] = entities
            filtered.append(
                Message(
                    text=message.text,
                    data=data,
                    output_properties=message.output_properties,
                    time=message.time,
                )
            )

        return filtered
