import logging
import os
import typing
import warnings
from typing import Any, Dict, List, Optional, Text, Tuple, Type

import numpy as np

import rasa.utils.io as io_utils
from rasa.constants import DOCS_URL_TRAINING_DATA_NLU
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.nlu.components import Component
from rasa.nlu.classifiers.classifier import IntentClassifier
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.constants import DENSE_FEATURE_NAMES, TEXT
from rasa.nlu.featurizers.featurizer import sequence_to_sentence_features
from rasa.nlu.model import Metadata
from rasa.nlu.training_data import Message, TrainingData
import rasa.utils.common as common_utils

logger = logging.getLogger(__name__)

if typing.TYPE_CHECKING:
    import sklearn


class SklearnIntentClassifier(IntentClassifier):
    """使用 sklearn 框架的意图分类器"""

    @classmethod
    def required_components(cls) -> List[Type[Component]]:
        return [DenseFeaturizer]

    defaults = {
        "C": [1, 2, 5, 10, 20, 100],  # svm 的 C 参数 - 交叉验证将选择最佳值
        "gamma": [0.1],  # 支持向量机的gamma参数
        "kernels": ["linear"],   # 用于 svm 训练的内核 - 交叉验证将决定其中哪一个表现最好
        "max_cross_validation_folds": 5,  # 我们尝试在意图训练期间找到一个好的交叉折叠使用，这指定了最大折叠次数
        "scoring_function": "f1_weighted",  # 用于评估超参数的评分函数这可以是名称或函数（请参阅 GridSearchCV 文档了解更多信息）
    }

    def __init__(
        self,
        component_config: Optional[Dict[Text, Any]] = None,
        clf: "sklearn.model_selection.GridSearchCV" = None,
        le: Optional["sklearn.preprocessing.LabelEncoder"] = None,
    ) -> None:
        """使用 sklearn 框架构建一个新的意图分类器。"""

        from sklearn.preprocessing import LabelEncoder

        super().__init__(component_config)

        if le is not None:
            self.le = le
        else:
            self.le = LabelEncoder()
        self.clf = clf

    @classmethod
    def required_packages(cls) -> List[Text]:
        return ["sklearn"]

    def transform_labels_str2num(self, labels: List[Text]) -> np.ndarray:
        """Transforms a list of strings into numeric label representation.

        :param labels: List of labels to convert to numeric representation"""

        return self.le.fit_transform(labels)

    def transform_labels_num2str(self, y: np.ndarray) -> np.ndarray:
        """将字符串列表转换为数字标签表示。

        :param y: 要转换为数字表示的标签列表"""

        return self.le.inverse_transform(y)

    def train(
        self,
        training_data: TrainingData,
        config: Optional[RasaNLUModelConfig] = None,
        **kwargs: Any,
    ) -> None:
        """Train the intent classifier on a data set."""

        num_threads = kwargs.get("num_threads", 1)

        labels = [e.get("intent") for e in training_data.intent_examples]

        if len(set(labels)) < 2:
            common_utils.raise_warning(
                "Can not train an intent classifier as there are not "
                "enough intents. Need at least 2 different intents. "
                "Skipping training of intent classifier.",
                docs=DOCS_URL_TRAINING_DATA_NLU,
            )
        else:
            y = self.transform_labels_str2num(labels)
            X = np.stack(
                [
                    sequence_to_sentence_features(
                        example.get(DENSE_FEATURE_NAMES[TEXT])
                    )
                    for example in training_data.intent_examples
                ]
            )

            # 降维
            X = np.reshape(X, (len(X), -1))

            self.clf = self._create_classifier(num_threads, y)

            # 如果意图示例很少，sklearn 会引发大量“UndefinedMetricWarning: F - score is undefined”，这是防止它发生的必要条件
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                self.clf.fit(X, y)

    def _num_cv_splits(self, y) -> int:
        folds = self.component_config["max_cross_validation_folds"]
        return max(2, min(folds, np.min(np.bincount(y)) // 5))

    def _create_classifier(
        self, num_threads: int, y
    ) -> "sklearn.model_selection.GridSearchCV":
        from sklearn.model_selection import GridSearchCV
        from sklearn.svm import SVC

        C = self.component_config["C"]
        kernels = self.component_config["kernels"]
        gamma = self.component_config["gamma"]

        # 脏str修复，因为sklearn期望str而不是basestr的实例...
        tuned_parameters = [
            {"C": C, "gamma": gamma, "kernel": [str(k) for k in kernels]}
        ]

        # 在每个折叠中瞄准 5 个示例
        cv_splits = self._num_cv_splits(y)

        return GridSearchCV(
            SVC(C=1, probability=True, class_weight="balanced"),
            param_grid=tuned_parameters,
            n_jobs=num_threads,
            cv=cv_splits,
            scoring=self.component_config["scoring_function"],
            verbose=1,
            iid=False,
        )

    def process(self, message: Message, **kwargs: Any) -> None:
        """返回message最可能的意图及其概率"""

        # 组件要么没有经过训练，要么没有收到足够的训练数据
        if not self.clf:
            intent = None
            intent_ranking = []
        else:
            X = sequence_to_sentence_features(message.get(DENSE_FEATURE_NAMES[TEXT])).reshape(1, -1)

            # 模型预测
            intent_ids, probabilities = self.predict(X)

            # 意图
            intents = self.transform_labels_num2str(np.ravel(intent_ids))

            # `predict` 返回一个矩阵，因为它应该也适用于多个示例，因此我们需要平铺
            probabilities = probabilities.flatten()
            if intents.size > 0 and probabilities.size > 0:
                ranking = list(zip(list(intents), list(probabilities)))[
                    :LABEL_RANKING_LENGTH
                ]

                # 意图中的第一个
                intent = {"name": intents[0], "confidence": probabilities[0]}

                # 意图排序
                intent_ranking = [
                    {"name": intent_name,
                     "confidence": score
                     }
                    for intent_name, score in ranking
                ]
            else:
                intent = {"name": None, "confidence": 0.0}
                intent_ranking = []

        # 意图识别结果加载到message中
        message.set("intent", intent, add_to_output=True)
        message.set("intent_ranking", intent_ranking, add_to_output=True)

    def predict_prob(self, X: np.ndarray) -> np.ndarray:
        """给定输入文本的弓形向量，预测意图标签。 返回所有标签的概率。
        :param X: bow of input text
        :return: vector of probabilities containing one entry for each label
        """

        return self.clf.predict_proba(X)

    def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """给定输入文本的弓形向量，预测最可能的标签。
         仅返回最可能的标签。

        :param X: bow of input text
        :return: tuple of first, the most probable label and second,
                 its probability."""

        pred_result = self.predict_prob(X)

        # 排序概率以排序顺序检索元素的索引
        sorted_indices = np.fliplr(np.argsort(pred_result, axis=1))
        return sorted_indices, pred_result[:, sorted_indices]

    def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
        """Persist this model into the passed directory."""

        classifier_file_name = file_name + "_classifier.pkl"
        encoder_file_name = file_name + "_encoder.pkl"
        if self.clf and self.le:
            io_utils.json_pickle(
                os.path.join(model_dir, encoder_file_name), self.le.classes_
            )
            io_utils.json_pickle(
                os.path.join(model_dir, classifier_file_name), self.clf.best_estimator_
            )
        return {"classifier": classifier_file_name, "encoder": encoder_file_name}

    @classmethod
    def load(
        cls,
        meta: Dict[Text, Any],
        model_dir: Optional[Text] = None,
        model_metadata: Optional[Metadata] = None,
        cached_component: Optional["SklearnIntentClassifier"] = None,
        **kwargs: Any,
    ) -> "SklearnIntentClassifier":
        """加载"""

        from sklearn.preprocessing import LabelEncoder

        classifier_file = os.path.join(model_dir, meta.get("classifier"))
        encoder_file = os.path.join(model_dir, meta.get("encoder"))

        if os.path.exists(classifier_file):
            classifier = io_utils.json_unpickle(classifier_file)
            classes = io_utils.json_unpickle(encoder_file)
            encoder = LabelEncoder()
            encoder.classes_ = classes
            return cls(meta, classifier, encoder)
        else:
            return cls(meta)
