import torch
import torch.nn as nn
from torchcrf import CRF

class NERLSTM_CRF(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, dropout, word2id, tag2id):
        super(NERLSTM_CRF, self).__init__()
        self.name = "BiLSTM_CRF"
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = len(word2id) + 1
        self.tag2id = tag2id
        self.tag_size = len(tag2id)

        self.word_embeds = nn.Embedding(self.vocab_size, self.embedding_dim)
        self.dropout = nn.Dropout(dropout)

        # CRF
        self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim // 2,
                            bidirectional=True, batch_first=True)

        self.hidden2tag = nn.Linear(self.hidden_dim, self.tag_size)
        self.crf = CRF(self.tag_size)

    def get_lstm2linear(self, x):  # 2 拿到 发射分数：lstm-linear result
        out = self.word_embeds(x)
        out, _ = self.lstm(out)
        out = self.dropout(out)
        out = self.hidden2tag(out)
        return out

    def forward(self, x, mask):  # 3 解码最优路径 前向传播
        '''
        crf 需要 mask.bool()
        解码用 crf.decode() 解码结果是list，不是tensor
        计算损失用 crf 前向传播
        '''
        out = self.get_lstm2linear(x)
        out = out * mask.unsqueeze(-1)
        out = self.crf.decode(out, mask.bool())
        return out

    def log_likelihood(self, x, tags, mask):  # 4 计算损失
        out = self.get_lstm2linear(x)
        out = out * mask.unsqueeze(-1)
        return -self.crf(out, tags, mask.bool(), reduction='mean')