import torch
import torch.nn as nn
from TorchCRF import CRF

from P03_NER.LSTM_CRF.utils.data_loader import *


class NERLSTM_CRF(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, dropout, word2id, tag2id):
        '''
        创建一个用于NER的BiLSMT模型
        :param embedding_dim: 嵌入层维度 300
        :param hidden_dim: 这里指的是BiLSTM模型输出时的维度，因为是双向LSTM，所以输出时的维度是隐藏层维度的2倍，所以实际隐藏层的维度为 hidden_dim//2
        :param dropout: 随机失活比例
        :param word2id: word2id字典
        :param tag2id: tag2id字典
        '''
        super(NERLSTM_CRF, self).__init__()
        # 定义name属性
        self.name = "BiLSTM_CRF"
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.dropout = dropout
        # 词典大小
        self.vocab_size = len(word2id)
        # 标签大小
        self.tag_size = len(tag2id)

        # 创建嵌入层
        self.embedding = nn.Embedding(self.vocab_size, self.embedding_dim)
        # 创建BiLSTM层
        self.bilstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_dim//2, bidirectional=True,batch_first=True)
        # 定义dropout
        self.dropout = nn.Dropout(self.dropout)
        # 创建全连接层
        self.fc = nn.Linear(self.hidden_dim, self.tag_size)
        # 创建CRF层
        self.crf = CRF(self.tag_size)

    # 获取发射分数
    def get_lstm2linear(self, input_ids, attention_mask):
        # print(f'input_ids--->{input_ids.shape}')
        # print(f'attention_mask--->{attention_mask.shape}')
        # 送入embedding层
        embedding = self.embedding(input_ids)
        # 送入BiLSTM层 直接传入input即可，会自动初始化(h0,c0)
        bilstm_out, (h_n, c_n) = self.bilstm(embedding)
        # 送入dropout层
        bilstm_out = self.dropout(bilstm_out)
        # 只保留有效位置的输出结果，将pad的部分置成0
        attention_mask = attention_mask.unsqueeze(-1)
        # 对位相乘
        output = bilstm_out * attention_mask
        # 送入全连接层
        result = self.fc(output)

        return result

    # 用来做预测的函数，用来获取预测的标签序列（路径）
    def forward(self, input_ids, attention_mask):
        # 获取发射分数
        emission_score = self.get_lstm2linear(input_ids, attention_mask)
        # 使用viterbi算法解码
        # 第一个参数就是发射分数，第二个参数是padding mask
        result = self.crf.viterbi_decode(emission_score, attention_mask)
        return result

    # 用来计算损失的函数
    def log_likelihood(self, input_ids, labels, attention_mask):
        # 获取发射分数
        emission_score = self.get_lstm2linear(input_ids, attention_mask)
        # 计算损失 其中mask的类型需要是BoolTensor
        loss = -self.crf(emission_score, labels, attention_mask.bool())
        return loss


def test_NERLSTM_CRF():
    ner_lstm_crf = NERLSTM_CRF(conf.embedding_dim, conf.hidden_dim, conf.dropout, word2id, conf.tag2id)
    print(f'ner_lstm_crf--->{ner_lstm_crf}')
    # 获取数据
    train_dataloader, dev_dataloader = get_data()
    for input_ids_padded, labels_padded, attention_mask in train_dataloader:
        result = ner_lstm_crf(input_ids_padded, attention_mask)
        print(f'标签预测结果--->{result}')
        loss = ner_lstm_crf.log_likelihood(input_ids_padded, labels_padded, attention_mask)
        print(f'损失--->{loss}')
        break


if __name__ == '__main__':
    test_NERLSTM_CRF()