# coding:utf-8
import torch
import torch.nn as nn
from TorchCRF import CRF
from data_loader import *


class BiLSTM_CRF(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, dropout, word2id, tag2id):
        super().__init__()
        self.name = "BiLSTM_CRF"
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = len(word2id)
        self.tag_size = len(tag2id)

        self.word_embedding = nn.Embedding(self.vocab_size, self.embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2, bidirectional=True, batch_first=True)
        self.dropout = nn.Dropout(dropout)
        self.out = nn.Linear(hidden_dim, self.tag_size)
        self.crf = CRF(self.tag_size)

    def forward(self, x, mask):
        out = self.lstm2linear(x)
        out = out * mask.unsqueeze(-1)
        outputs = self.crf.viterbi_decode(out, mask)
        return outputs

    def log_likelihood(self, x, tag, mask):
        out = self.lstm2linear(x)
        out = out * mask.unsqueeze(-1)
        # mask = mask.bool()
        outputs = - self.crf(out, tag, mask)
        return outputs

    def lstm2linear(self, x):
        embedding = self.word_embedding(x)
        outputs, _ = self.lstm(embedding)
        outputs = self.dropout(outputs)
        return self.out(outputs)


if __name__ == '__main__':
    bilstm_crf = BiLSTM_CRF(config.embedding_dim, config.hidden_dim, config.dropout, word2id, config.tag2id)
    train_dataloader, dev_dataloader = get_data()
    for input_ids_padded, label_padded, attention_mask in train_dataloader:
        outputs = bilstm_crf(input_ids_padded, attention_mask)
        print(outputs)
        # attention_mask = attention_mask.bool()
        loss = bilstm_crf.log_likelihood(input_ids_padded, label_padded, attention_mask)
        print(loss)
        break