"""
这是一个精致的包装。它包含所有已注册的组件和预配置的模板。
因此，它将导入所有组件。
为避免循环，任何【组件】都不应将此导入此【模块】范围。

Hence, it imports all of the components. To avoid cycles, no component should import this in module scope.
"""

import logging
import typing
from typing import Any, Dict, List, Optional, Text, Type

from rasa.constants import DOCS_URL_COMPONENTS

# 文本分类（Intent Classifiers）
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.nlu.classifiers.keyword_intent_classifier import KeywordIntentClassifier
from rasa.nlu.classifiers.mitie_intent_classifier import MitieIntentClassifier
from rasa.nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier
from rasa.nlu.classifiers.embedding_intent_classifier import EmbeddingIntentClassifier

# 实体抽取（Entity Extractors）
from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor

# 特征抽取（Text Featurizers）
from rasa.nlu.featurizers.sparse_featurizer.lexical_syntactic_featurizer import (
    LexicalSyntacticFeaturizer,
)
from rasa.nlu.featurizers.dense_featurizer.convert_featurizer import ConveRTFeaturizer
from rasa.nlu.featurizers.dense_featurizer.mitie_featurizer import MitieFeaturizer
from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer
from rasa.nlu.featurizers.sparse_featurizer.count_vectors_featurizer import (
    CountVectorsFeaturizer,
)
from rasa.nlu.featurizers.dense_featurizer.lm_featurizer import LanguageModelFeaturizer
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer

# 分词（Tokenizers）
from rasa.nlu.tokenizers.convert_tokenizer import ConveRTTokenizer
from rasa.nlu.tokenizers.jieba_tokenizer import JiebaTokenizer
from rasa.nlu.tokenizers.mitie_tokenizer import MitieTokenizer
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.tokenizers.lm_tokenizer import LanguageModelTokenizer

# 词向量源（Word Vector Sources）
from rasa.nlu.utils.mitie_utils import MitieNLP
from rasa.nlu.utils.spacy_utils import SpacyNLP
from rasa.nlu.utils.hugging_face.hf_transformers import HFTransformersNLP

# 选择器（Selectors）
from rasa.nlu.selectors.response_selector import ResponseSelector

# 其他
from rasa.nlu.model import Metadata
from rasa.utils.common import class_from_module_path, raise_warning

from rasa.utils.tensorflow.constants import (
    INTENT_CLASSIFICATION,
    ENTITY_RECOGNITION,
    NUM_TRANSFORMER_LAYERS,
)

if typing.TYPE_CHECKING:
    from rasa.nlu.components import Component
    from rasa.nlu.config import RasaNLUModelConfig, RasaNLUModelConfig

logger = logging.getLogger(__name__)


# 所有已知组件的类。 如果要添加新组件，则应在此处列出其类名。
component_classes = [
    # utils（工具包）
    SpacyNLP,
    MitieNLP,
    HFTransformersNLP,

    # tokenizers（分词器）
    MitieTokenizer,
    SpacyTokenizer,
    WhitespaceTokenizer,
    ConveRTTokenizer,
    JiebaTokenizer,
    LanguageModelTokenizer,

    # extractors（抽取器）
    SpacyEntityExtractor,
    MitieEntityExtractor,
    CRFEntityExtractor,
    DucklingHTTPExtractor,
    EntitySynonymMapper,

    # featurizers（特征化）
    SpacyFeaturizer,
    MitieFeaturizer,
    RegexFeaturizer,
    LexicalSyntacticFeaturizer,
    CountVectorsFeaturizer,
    ConveRTFeaturizer,
    LanguageModelFeaturizer,

    # classifiers（分类器）
    SklearnIntentClassifier,
    MitieIntentClassifier,
    KeywordIntentClassifier,
    DIETClassifier,
    EmbeddingIntentClassifier,

    # selectors（选择器）
    ResponseSelector,
]

# 从组件名称映射到其组件类，以允许基于名称的查找。
registered_components = {c.name: c for c in component_classes}

# DEPRECATED：确保兼容性，将在将来的版本中删除
old_style_names = {
    "nlp_spacy": "SpacyNLP",
    "nlp_mitie": "MitieNLP",
    "ner_spacy": "SpacyEntityExtractor",
    "ner_mitie": "MitieEntityExtractor",
    "ner_crf": "CRFEntityExtractor",
    "ner_duckling_http": "DucklingHTTPExtractor",
    "ner_synonyms": "EntitySynonymMapper",
    "intent_featurizer_spacy": "SpacyFeaturizer",
    "intent_featurizer_mitie": "MitieFeaturizer",
    "intent_featurizer_ngrams": "NGramFeaturizer",
    "intent_entity_featurizer_regex": "RegexFeaturizer",
    "intent_featurizer_count_vectors": "CountVectorsFeaturizer",
    "tokenizer_mitie": "MitieTokenizer",
    "tokenizer_spacy": "SpacyTokenizer",
    "tokenizer_whitespace": "WhitespaceTokenizer",
    "tokenizer_jieba": "JiebaTokenizer",
    "intent_classifier_sklearn": "SklearnIntentClassifier",
    "intent_classifier_mitie": "MitieIntentClassifier",
    "intent_classifier_keyword": "KeywordIntentClassifier",
    "intent_classifier_tensorflow_embedding": "EmbeddingIntentClassifier",
}

# 为了简化pipeline的使用：有几个模型模板，它们已经按正确的顺序添加了必要的组件。 他们还实现了预先存在的“后端”。
registered_pipeline_templates = {
    "pretrained_embeddings_spacy": [
        {"name": "SpacyNLP"},
        {"name": "SpacyTokenizer"},
        {"name": "SpacyFeaturizer"},
        {"name": "RegexFeaturizer"},
        {"name": "CRFEntityExtractor"},
        {"name": "EntitySynonymMapper"},
        {"name": "SklearnIntentClassifier"},
    ],
    "keyword": [{"name": "KeywordIntentClassifier"}],
    "supervised_embeddings": [
        {"name": "WhitespaceTokenizer"},
        {"name": "RegexFeaturizer"},
        {"name": "CRFEntityExtractor"},
        {"name": "EntitySynonymMapper"},
        {"name": "CountVectorsFeaturizer"},
        {
            "name": "CountVectorsFeaturizer",
            "analyzer": "char_wb",
            "min_ngram": 1,
            "max_ngram": 4,
        },
        {"name": "EmbeddingIntentClassifier"},
    ],
    "pretrained_embeddings_convert": [
        {"name": "ConveRTTokenizer"},
        {"name": "ConveRTFeaturizer"},
        {"name": "EmbeddingIntentClassifier"},
    ],
}


def pipeline_template(s: Text) -> Optional[List[Dict[Text, Any]]]:
    """进行深copy以避免更改模板配置"""
    import copy

    return copy.deepcopy(registered_pipeline_templates.get(s))


def get_component_class(component_name: Text) -> Type["Component"]:
    """将组件名称解析为已注册的组件类。这里主要是通过手动添加组件到该py中，
    否则，通过importlib路径搜索进行添加。
    """

    if component_name not in registered_components:
        if component_name not in old_style_names:
            # 如果组件列表中没有列出该类，也不是旧的名称
            try:
                # 当component_name：是类的路径但该路径不包含该类时，进行搜索
                return class_from_module_path(component_name)
            except AttributeError:
                module_name, _, class_name = component_name.rpartition(".")
                raise Exception(
                    f"Failed to find class '{class_name}' in module '{module_name}'.\n"
                )

            # 当component_name是类的路径但该路径无效时，
            # 或者当component_name是类名而不是old_style_names的一部分时
            except ImportError as e:
                is_path = "." in component_name
                if is_path:
                    module_name, _, _ = component_name.rpartition(".")
                    exception_message = f"Failed to find module '{module_name}'. \n{e}"
                else:
                    exception_message = (
                        f"Cannot find class '{component_name}' from global namespace. "
                        f"Please check that there is no typo in the class "
                        f"name and that you have imported the class into the global "
                        f"namespace."
                    )

                raise ModuleNotFoundError(exception_message)
        else:
            # DEPRECATED：确保兼容性，请在将来的版本中删除
            raise_warning(
                f"Your nlu config file "
                f"contains old style component name `{component_name}`, "
                f"you should change it to its new class name: "
                f"`{old_style_names[component_name]}`.",
                FutureWarning,
                docs=DOCS_URL_COMPONENTS,
            )
            component_name = old_style_names[component_name]

    return registered_components[component_name]


def load_component_by_meta(
    component_meta: Dict[Text, Any],
    model_dir: Text,
    metadata: Metadata,
    cached_component: Optional["Component"],
    **kwargs: Any,
) -> Optional["Component"]:
    """解析组件并调用其load方法。
        根据先前持久的模型进行初始化。
    """

    # 尝试首先获取组件类，否则获取组件类名
    component_name = component_meta.get("class", component_meta["name"])
    component_class = get_component_class(component_name)
    return component_class.load(
        component_meta, model_dir, metadata, cached_component, **kwargs
    )


def create_component_by_config(
    component_config: Dict[Text, Any], config: "RasaNLUModelConfig"
) -> Optional["Component"]:
    """解析一个组件并调用它的create方法。
        根据先前持久的模型进行初始化。
    """

    # 尝试首先获取类名称，否则按名称创建
    component_name = component_config.get("class", component_config["name"])
    component_class = get_component_class(component_name)
    return component_class.create(component_config, config)
