from __future__ import annotations
import torch
from transformers import AutoTokenizer, AutoModel
import torchcrf
from transformers import logging
from typing import TYPE_CHECKING, List

# from zmq import device
from utils.handy_functions import pad_inner_lists_of_2d_list
# from word_vec_encoders.char_word_vec_encoder import CharWordVecEncoder
# from word_vec_encoders.pinyin_word_vec_encoder import PinyinWordVecEncoder
from word_vec_encoders.word_vec_encoder_set import WordVecEncoderSet

if TYPE_CHECKING:
    from ner.dataset import Dataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class NerModel(torch.nn.Module):

    def __init__(self,
                 known_labels: List[str],
                 pretrained_model_name: str,
                 fine_tuning_only: bool = True,
                 use_dynamic_fusion: bool = False,
                 num_fusion_weight_fc_layers: int = 1,
                 word_vec_encoder_set: WordVecEncoderSet = None,
                 use_bilstm: bool = True, num_bilstm_layers: int = 1, bilstm_dropout: int = 0,
                 use_crf: bool = True,
                 ) -> None:
        """
        创建一个 NER 模型

        参数: 
            fine_tuning_only (bool, optional): 是否只进行微调, 默认为 True. 使用预训练模型时有效
            use_dynamic_fusion (bool): 是否使用动态融合
            num_fusion_weight_layers (int): 动态融合所用的全连接网络的层数
            additional_word_vec_manager (StaticWordVecManager): 该词向量管理器额用于额外添加多种特征, 默认为None
            use_bilstm (bool, optional): 是否使用 BiLSTM , 默认为 True
            num_bilstm_layers (int, optional): BiLSTM 的层数, 默认为 1. use_bilstm == True 时有效
            bilstm_dropout (int, optional): BiLSTM 层间的 dropout 比率, 默认为 0. num_bilstm_layers > 1 时有效
            use_crf (bool, optional): 是否使用 CRF 层, 默认为 True
        """
        super().__init__()
        logging.set_verbosity_error()  # 不输出大量警告信息

        # 数据集和标签
        self.known_labels = known_labels
        self.pretrained_model_name = pretrained_model_name
        self.fine_tuning_only = fine_tuning_only
        self.use_dynamic_fusion = use_dynamic_fusion
        self.num_fusion_weight_layers = num_fusion_weight_fc_layers

        if self.pretrained_model_name is not None:
            self.pretrained_model = AutoModel.from_pretrained(self.pretrained_model_name)
            self.pretrained_hidden_dims = self._test_pretrained_hidden_state_sizes()
            self.pretrained_out_dim = self.pretrained_hidden_dims[-1]
            num_params = sum(p.numel() for p in list(self.pretrained_model.parameters()))
            print(f'选用预训练模型 {self.pretrained_model_name}, 参数量：{num_params / 1e4} 万')
            if self.fine_tuning_only:
                print(f'仅微调')
                [parameter.requires_grad_(False) for parameter in self.pretrained_model.parameters()]
                # 动态融合所需计算
            if self.use_dynamic_fusion:
                print(f'使用动态融合')
                self.fusion_first_layer_dim = sum(self.pretrained_hidden_dims)  # 输入层维度
                self.fusion_last_layer_dim = len(self.pretrained_hidden_dims)  # 输出层维度
                prev_layer_dim = self.fusion_first_layer_dim
                self.fusion_weight_fc_hidden_layers = torch.nn.Sequential()  # 计算权重的多层全连接层的中间层
                for i in range(0, self.num_fusion_weight_layers - 1):  # 中间层
                    current_layer_dim = prev_layer_dim // 4
                    if current_layer_dim <= self.fusion_last_layer_dim:
                        raise Exception(f'动态融合权重网络的层数（{self.num_fusion_weight_layers}）设置得过多')
                    self.fusion_weight_fc_hidden_layers.add_module(f'fc{i}',
                                                                   torch.nn.Linear(prev_layer_dim, current_layer_dim))
                    self.fusion_weight_fc_hidden_layers.add_module(f'relu{i}', torch.nn.ReLU())
                    prev_layer_dim = current_layer_dim
                layer = torch.nn.Linear(prev_layer_dim, self.fusion_last_layer_dim)  # 计算权重的多层全连接层的最后一层
                self.fusion_weight_fc_last_layer = layer
            else:
                print(f'不使用动态融合')
        else:
            self.pretrained_model = None
            self.pretrained_hidden_dims = None
            self.pretrained_out_dim = 0
            self.bert_hiddens_to_weights = None
            print(f'不使用预训练模型')

        ### 其他特征 ###
        if word_vec_encoder_set is None:
            word_vec_encoder_set = WordVecEncoderSet()
        self.word_vec_encoder_set = word_vec_encoder_set
        self.word_vec_dim_sum = 0
        for encoder in word_vec_encoder_set:
            print(f'使用特征 {encoder.name}')
            self.word_vec_dim_sum += encoder.embedding_dim

        ### BiLSTM ###
        bilstm_input_dim = self.pretrained_out_dim + self.word_vec_dim_sum
        if bilstm_input_dim == 0:
            raise Exception('预训练模型和词向量都不存在')
        self.use_bilstm = use_bilstm
        self.num_classes = len(self.known_labels)
        self.num_bilstm_layers = num_bilstm_layers
        self.bilstm_dropout = bilstm_dropout
        self.use_crf = use_crf
        if use_bilstm:
            print(f'使用BiLSTM, 层数 {num_bilstm_layers}')
            num_directions = 2
            self.bilstm = torch.nn.LSTM(
                input_size=bilstm_input_dim,
                hidden_size=bilstm_input_dim // num_directions,
                batch_first=True,
                num_layers=num_bilstm_layers,
                dropout=bilstm_dropout,
                bidirectional=True
            )
            self.linear = torch.nn.Linear(bilstm_input_dim, self.num_classes)
        else:
            print(f'不使用BiLSTM')
            self.bilstm = None
            self.linear = torch.nn.Linear(bilstm_input_dim, self.num_classes)

            ### CRF ###
        if use_crf:
            print(f'使用CRF')
            self.crf = torchcrf.CRF(self.num_classes, True)
        else:
            print(f'不使用CRF')
            self.crf = None

    @staticmethod
    def load_from(model_file_name: str):
        # 载入模型结构创建模型
        print(f'从 {model_file_name} 载入模型')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print(f'L使用:', device)
        model = torch.load(model_file_name, map_location=device)['model']
        model.to(device)
        return model

    def forward_before_classification(self,
                                      word_vec_input_tensors: List[torch.Tensor],
                                      bert_token_ids: torch.LongTensor,
                                      bert_token_types: torch.LongTensor,
                                      masks: torch.LongTensor,
                                      device: str) -> torch.LongTensor:
        '''模型在 CRF 层之前的部分的前向计算'''

        batch_size, batch_len = masks.size()

        # 预训练模型
        if self.pretrained_model is not None:
            with torch.no_grad():
                out = self.pretrained_model(
                    input_ids=bert_token_ids,
                    attention_mask=masks,
                    token_type_ids=bert_token_types,
                    output_hidden_states=True
                )
            if self.use_dynamic_fusion:
                # 使用动态融合
                alpha_input = torch.cat(out.hidden_states[1:], dim=2)
                alpha_hidden_out = self.fusion_weight_fc_hidden_layers(alpha_input)
                alpha_out = self.fusion_weight_fc_last_layer(alpha_hidden_out).softmax(dim=2)
                alpha_out = alpha_out[:, :, None, :]
                multilayer_embeddings = torch.stack(out.hidden_states[1:], dim=3)
                pretrained_out = torch.sum(alpha_out * multilayer_embeddings, dim=3)
            else:
                # 不使用动态融合
                pretrained_out: torch.Tensor = out.last_hidden_state
        else:
            pretrained_out = torch.zeros((batch_size, batch_len, 0)).to(device)

        if self.word_vec_encoder_set.count() > 0:
            # 其他特征
            word_vector_out = self.word_vec_encoder_set(word_vec_input_tensors, batch_size, device)
            out = torch.cat((pretrained_out, word_vector_out), dim=2)
        else:
            out = pretrained_out

        # BiLSTM
        if self.bilstm != None:
            out, _ = self.bilstm(out)

        # 全连接映射
        out = self.linear(out)

        return out

    def classify(self, out: torch.Tensor, attention_mask: torch.LongTensor):
        # 分类的计算
        if self.crf is not None:
            attention_mask = attention_mask == 1
            # 使用 CRF 分类
            out = self.crf.decode(emissions=out, mask=attention_mask)
            out = pad_inner_lists_of_2d_list(out, self.known_labels.index('控制标签'))
            out = torch.LongTensor(out)
        else:
            # 使用 Softmax 分类
            out = out.softmax(dim=2)
        return out

    def forward(self,
                input_strings: List[str],
                word_vec_input_tensors: List[torch.Tensor],
                bert_token_ids: torch.LongTensor,
                bert_token_types: torch.LongTensor,
                bert_masks: torch.LongTensor,
                gold_label_ids: torch.LongTensor,
                device: str) -> torch.LongTensor:
        attention_mask = (bert_masks == 1)  # 转换为 True/False 形式的 mask
        # 分类之前的计算
        out = self.forward_before_classification(
            word_vec_input_tensors,
            bert_token_ids,
            bert_token_types,
            bert_masks,
            device
        )
        # 分类的计算
        out = self.classify(out, bert_masks)
        return out

    def neg_log_likelihood_loss(self,
                                input_strings: List[str],
                                word_vec_input_tensors: List[torch.Tensor],
                                bert_token_ids: torch.LongTensor,
                                bert_token_types: torch.LongTensor,
                                bert_masks: torch.LongTensor,
                                gold_label_ids: torch.LongTensor,
                                device: str):

        bool_mask = (bert_masks == 1)  # 转换为 True/False 形式的 mask

        # CRF 层之前的内容
        out = self.forward_before_classification(
            word_vec_input_tensors,
            bert_token_ids,
            bert_token_types,
            bert_masks,
            device
        )
        # CRF 层
        loss = - self.crf(emissions=out, tags=gold_label_ids, mask=bool_mask, reduction='mean')

        return loss

    def neg_log_likelihood_loss_2(self,
                                  seq_scores: torch.Tensor,
                                  bool_masks: torch.LongTensor,
                                  gold_label_ids: torch.LongTensor,
                                  ):

        loss = - self.crf(emissions=seq_scores, tags=gold_label_ids, mask=bool_masks, reduction='mean')

        return loss

    def _test_pretrained_hidden_state_sizes(self) -> List[int]:
        '''试算, 测试预训练模型输出的维度'''
        tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model_name)
        inputs = tokenizer("这是个测试句子", return_tensors='pt')
        outputs = self.pretrained_model(**inputs, output_hidden_states=True)
        return [h.size()[2] for h in outputs.hidden_states[1:]]  # 第一个 hidden_state 是输入，排除

    def get_description(self) -> str:
        """
        根据模型当前配置, 返回一个描述模型配置的字符串. 该字符串不含非法字符, 可以作为文件名. 

        返回:
            str: 描述模型配置的字符串
        """
        text = ''
        text += f'{self.pretrained_model_name}'
        text += f'{"-dynamicfusion" + str(self.num_fusion_weight_layers) + "x" if self.use_dynamic_fusion else ""}'
        for wv in self.word_vec_encoder_set:
            text += f'-{wv.name}'
        text += f'{"-bilstm" + str(self.num_bilstm_layers) + "x" if self.use_bilstm else ""}'
        text += f'{"-crf" if self.crf is not None else ""}'
        illegal_chars = '\/:*?<>|@#$&'
        for c in illegal_chars:
            text = text.replace(c, '-')
        return text
