# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
from TorchCRF import CRF

import numpy as np


class LSTM_CRF(nn.Module):

    def __init__(self,
                 vocab_size: int,
                 tgt_size: int,
                 emb_dim: int,
                 max_len: int,
                 hidden_size: int,
                 num_layers: int = 2,
                 keep_prob: float = 0.5,
                 pad_idx: int = 0,
                 use_cuda: bool = True):
        super(LSTM_CRF, self).__init__()
        self.device = torch.device("cuda:0") if use_cuda else torch.device("cpu:0")
        self.embeddings = nn.Embedding(vocab_size, emb_dim, padding_idx=pad_idx)
        self.pe = LSTM_CRF.positional_encoding(max_len, emb_dim, self.device)
        self.emb_dropout = nn.Dropout(0.5)
        self.layerNorm = nn.LayerNorm(emb_dim)
        self.lstm = nn.LSTM(emb_dim, hidden_size // 2, num_layers, batch_first=True, dropout=keep_prob, bidirectional=True)
        self.linear = nn.Linear(hidden_size, tgt_size, bias=True)
        self.crf = CRF(tgt_size, pad_idx, use_gpu=use_cuda)

        self.pad_idx = pad_idx
        self.max_len = max_len

        self.to(self.device)

    @staticmethod
    def positional_encoding(max_len, emb_dim, device):
        pe = torch.LongTensor(max_len, emb_dim)
        pos = torch.arange(0, max_len, 1.0).unsqueeze(1)
        k = torch.exp(-np.log(10000) * torch.arange(0, emb_dim, 2.) / emb_dim)
        pe[:, 0::2] = torch.sin(pos * k)
        pe[:, 1::2] = torch.cos(pos * k)
        return pe.to(device)

    def forward(self, sent, tags, masks):  # for train, get loss: neg llh
        batch_size, max_len = sent.size()
        seq_len = torch.sum(masks, dim=1).long()

        # embedding layer
        emb = self.embeddings(sent)
        emb = emb + self.pe[:max_len]
        emb = self.emb_dropout(emb)
        emb = self.layerNorm(emb)

        # lstm layer
        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True, padding_value=self.pad_idx, total_length=max_len)
        out = self.linear(out)

        # crf layer --> get llh
        llh = self.crf(out, tags, masks)
        return -1 * llh.mean()  # neg llh
        # loss.backward(): RuntimeError: grad can be implicitly created only for scalar outputs. Should llh.mean().

    def decode(self, sent, masks):  # for inference
        batch_size, max_len = sent.size()
        seq_len = torch.sum(masks, dim=1).long()

        # embedding layer
        emb = self.embeddings(sent)
        emb = emb + self.pe[:max_len]
        emb = self.emb_dropout(emb)
        emb = self.layerNorm(emb)

        # lstm layer
        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True, padding_value=self.pad_idx,
                                                  total_length=max_len)
        out = self.linear(out)

        # crf layer --> viterbi decode
        best_tags = self.crf.viterbi_decode(out, masks)
        return best_tags


