from tabnanny import check
from typing import List
from ner.ner_model import NerModel
from ner.predictor import Predictor
from ner.trainer import Trainer
from word_vec_encoders.word_vec_encoder_set import WordVecEncoderSet
from ner.dataset import Dataset
import torch

class NerCase:

    def __init__(self,
                    pretrained_model_name:str = None,   
                    use_dynamic_fusion:bool = True,
                    num_dynamic_fusion_layers:int = 1,
                    word_vector_encoder_set:WordVecEncoderSet = None,
                    use_bilstm:bool = True, num_bilstm_layers:int = 1, bilstm_dropout:float = 0,
                    use_crf:bool = True,
                    fine_tuning_only:bool = True
                    ):
        """
        初始化一个 NER 模型案例

        参数:
            pretrained_model_name (str): 预训练模型的名字, 使用 Huggingface 模型的名字
            use_dynamic_fusion (bool): 是否使用动态融合机制
            num_fusion_weight_fc_layers (int): 动态融合机制中，全连接层的层数，当 use_dynamic_fusion = True 时才有效
            word_vector_encoder_manager (WordVecEncoderManager): 传入一个静态词向量管理器, 可包含多种类型的静态词向量
            use_bilstm (bool): 是否使用 BiLSTM
            num_bilstm_layers (int): BiLSTM 的层数, 默认为 1, 当 use_bilstm = True 时才有效
            bilstm_dropout (float): BiLSTM 层间的 Dropout 值, 默认为 0, 当 num_bilstm_layers > 1 时才有效
            use_crf (bool): 分类层是否使用 CRF 进行, 否则使用 Softmax. 默认为 True
            fine_tuning_only (bool): 是否仅微调. 默认为 True. 

        返回：
            NerCase : 返回的 NER 案例
        """
        self.pretrained_model_name = pretrained_model_name
        self.use_dynamic_fusion = use_dynamic_fusion
        self.num_fusion_weight_fc_layers = num_dynamic_fusion_layers
        self.fine_tuning_only = fine_tuning_only
        self.word_vector_encoder_set = word_vector_encoder_set
        self.use_bilstm = use_bilstm
        self.num_bilstm_layers = num_bilstm_layers
        self.bilstm_dropout = bilstm_dropout
        self.use_crf = use_crf
        self.resume_from = None
        self.trainer = None
        self.predictor = None
        self.model = None

    @staticmethod
    def load_from(model_file_name:str):
        """
        从模型文件中恢复一个 Ner 案例

        Args:
            model_file_name (str): 模型文件的文件名

        Returns:
            NerCase: 载入的 NER 案例
        """
        case = NerCase()
        case.resume_from = model_file_name
        return case

    def train(self, num_epochs:int,
                    dataset_name:str,
                    train_corpus_path:str,
                    test_corpus_path:str,
                    corpus_type:str,
                    test_every_n_epoch:int = 1,
                    save_every_n_epoch:int = 1,
                    batch_size:int = 32,
                    learning_rate:float = 2e-5,
                    data_loader_num_workers:int = 1,
                    ) -> None:
        """
        训练该案例

        参数:
            num_epochs (int): 训练的总轮数
            train_corpus_path (str): 训练语料的路径. 该路径下所有文件都会被读取
            test_corpus_path (str): 测试语料的路径. 该路径下所有文件都会被读取
            corpus_type (str): 语料的类型, 现只支持 'ccks' 和 'peoplesdaily' 两种
            batch_size (int, optional): 批次的大小, 默认为 32
            learning_rate (float, optional): 学习率, 默认为 2e-5
            stop_criterion (float, optional): 训练中止的性能提升速度阈值，默认为 1e-8
        """
        if self.trainer is None:
            if self.resume_from is not None:
                # 载入数据集
                self.train_dataset, self.test_dataset = Dataset.load_train_and_test_dataset_from(self.resume_from)
                # 載入模型
                self.model = NerModel.load_from(self.resume_from)
                # 载入训练器
                self.trainer = Trainer.load_from(self.resume_from, self.model, batch_size, batch_size, learning_rate)
            else:
                # 创建数据集
                self.train_dataset = Dataset(self.pretrained_model_name, 
                                                corpus_path=train_corpus_path, 
                                                corpus_type=corpus_type)
                self.test_dataset = Dataset(self.pretrained_model_name, 
                                                corpus_path=test_corpus_path, 
                                                corpus_type=corpus_type, 
                                                known_labels = self.train_dataset.known_labels) # 采用与训练集相同的标签顺序
                # 创建新模型
                self.model = NerModel(known_labels = self.train_dataset.known_labels,
                                    pretrained_model_name = self.pretrained_model_name,
                                    fine_tuning_only = self.fine_tuning_only,
                                    use_dynamic_fusion = self.use_dynamic_fusion,
                                    num_fusion_weight_fc_layers = self.num_fusion_weight_fc_layers,
                                    word_vec_encoder_set = self.word_vector_encoder_set,
                                    use_bilstm = self.use_bilstm,
                                    num_bilstm_layers = self.num_bilstm_layers,
                                    bilstm_dropout = self.bilstm_dropout,
                                    use_crf = self.use_crf)

                # 创建新训练器
                self.trainer = Trainer(known_labels = self.train_dataset.known_labels,
                                    model = self.model, 
                                    train_batch_size = batch_size, 
                                    test_batch_size = batch_size, 
                                    learning_rate = learning_rate)

            print(self.model)

            self.trainer.train(self.model, 
                            self.train_dataset, 
                            self.test_dataset,
                            self.train_dataset.get_data_loader(self.model, batch_size, data_loader_num_workers), 
                            self.test_dataset.get_data_loader(self.model, batch_size, data_loader_num_workers), 
                            num_epochs, 
                            test_every_n_epoch, 
                            save_every_n_epoch,
                            dataset_name,
                            )


    def predict(self, text:str, return_style:str = 'isolated'):
        """
        输入一个字符串, 预测该字符串的序列标记结果

        参数:
            text (str): 输入的字符串
            return_label_index (bool): 是否返回标签索引？如果是, 则返回一个整数列表；如果否, 则返回一个字符串, 默认值 False
            token_space_char (str, optional): 所返回的字符串中, 字符之间的间隔. 默认为空格. 只有 return_label_index == False 时有效. 

        返回:
            Union[str, List[int]]: 返回字符串, 或整数数组, 其内容为输入字符串的序列标记结果
        """
        return self.predict_batch([text], return_style)

    def predict_batch(self, batch_text:List[str], return_style:str = 'isolated'):
        """
        输入一个字符串, 预测该字符串的序列标记结果

        参数:
            text (str): 输入的字符串
            return_style (str): 返回数据的风格。
                                如果是'int', 则返回标签索引的列表；
                                如果是'label', 则返回标签字符串的列表;
                                如果是'parallel', 则返回原字符与标签平行对应的字符串
                                默认为'parallel'

        返回:
            Union[List[str], List[int]]: 返回序列标记结果
        """
        if self.predictor is None:
            if self.model is None:
                if self.resume_from is not None:
                    # 載入模型
                    self.model = NerModel.load_from(self.resume_from)
                else:
                    raise Exception('必须载入一个模型')
            self.predictor = Predictor(self.model, self.model.known_labels)
        result = []
        for text in batch_text:
            result.extend(self.predictor.predict(text, return_style))
        return result