# -*- coding: utf-8 -*-

import torch
import torch.nn as nn


class LSTM_Softmax(nn.Module):

    def __init__(self,
                 embedding_module: nn.Module,
                 tgt_size: int,
                 emb_dim: int,
                 max_len: int,
                 hidden_size: int,
                 num_layers: int = 2,
                 keep_prob: float = 0.5,
                 pad_idx: int = 0,
                 use_cuda: bool = True):
        super(LSTM_Softmax, self).__init__()
        self.device = torch.device("cuda:0") if use_cuda else torch.device("cpu:0")
        self.pad_idx = pad_idx
        self.embeddings = embedding_module
        self.lstm = nn.LSTM(emb_dim, hidden_size // 2, num_layers, batch_first=True, dropout=keep_prob, bidirectional=True)
        self.linear = nn.Linear(hidden_size, tgt_size, bias=True)
        self.max_len = max_len
        self.to(self.device)

    def forward(self, entities, tokens, masks):
        batch_size, max_len = entities.size()
        seq_len = masks.sum(dim=1).long()
        emb = self.embeddings(entities, tokens, masks)
        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True, padding_value=self.pad_idx, total_length=max_len)
        out = torch.tanh(out)
        seq_ends = (seq_len - 1)  # (batch_size, )
        out = self.linear(out[torch.arange(batch_size), seq_ends, :])  # last hidden state
        return out
