import torch
import torch.nn as nn


class LSTM_CNN(nn.Module):

    def __init__(self, embedding_module, emb_dim, num_layers, hidden_size, tgt_size, max_len, keep_prob=0.5, pad_idx=0, use_cuda=True):
        super(LSTM_CNN, self).__init__()
        self.embeddings = embedding_module
        self.lstm = nn.LSTM(emb_dim, hidden_size // 2, num_layers, batch_first=True, dropout=keep_prob, bidirectional=True)
        self.conv = nn.Conv1d(hidden_size, tgt_size, kernel_size=max_len, stride=1, padding=0)
        self.layerNorm = nn.LayerNorm(hidden_size)
        self.batchNorm = nn.BatchNorm1d(hidden_size, eps=1e-8)
        self.dropout = nn.Dropout(keep_prob)
        self.pad_idx = pad_idx
        self.tgt_size = tgt_size
        self.max_len = max_len
        self.device = torch.device("cuda:0") if use_cuda else torch.device("cpu:0")
        self.to(self.device)

    def forward(self, sentences, tokens, masks):
        batch_size, max_len = sentences.size()
        emb = self.embeddings(sentences, tokens, masks)
        seq_len = masks.sum(dim=1).long()
        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True, padding_value=self.pad_idx, total_length=max_len)
        out = self.layerNorm(out)
        out = self.dropout(out)
        out = out.transpose(-1, -2)
        feats = self.conv(out).squeeze(2)
        return feats
