import torch
import torch.nn as nn


class CNNLayer(nn.Module):

    def __init__(self, in_channels, out_channels, max_len, kernel_size, stride, padding):
        super(CNNLayer, self).__init__()
        self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool1d((max_len + 2*padding - kernel_size)//stride + 1)

    def forward(self, x):
        out = self.conv(x)  # (batch_size, C_in, L_in)
        out = self.relu(out)
        out = self.pool(out)  # (batch_size, C_out, 1)
        out = out.squeeze(2)
        return out


class LSTM_TextCNN(nn.Module):

    def __init__(self, embedding_module, emb_dim, num_layers, hidden_size, tgt_size, kernel_sizes, kernel_nums, max_len, keep_prob=0.5, pad_idx=0, use_cuda=True):
        super(LSTM_TextCNN, self).__init__()
        self.embeddings = embedding_module
        self.lstm = nn.LSTM(emb_dim, hidden_size // 2, num_layers, batch_first=True, dropout=keep_prob, bidirectional=True)
        self.layers = nn.ModuleList([CNNLayer(hidden_size, kernel_nums, max_len, kernel_size, 1, 0) for kernel_size in kernel_sizes])
        self.linear = nn.Linear(kernel_nums * len(kernel_sizes), tgt_size)
        self.dropout = nn.Dropout(keep_prob)
        self.pad_idx = pad_idx
        self.max_len = max_len
        self.tgt_size = tgt_size
        self.device = torch.device("cuda:0") if use_cuda else torch.device("cpu:0")
        self.to(self.device)

    def forward(self, sentences, tokens, masks):  # for train
        batch_size, max_len = sentences.size()
        emb = self.embeddings(sentences, tokens, masks)
        seq_len = masks.sum(dim=1).long()
        out = nn.utils.rnn.pack_padded_sequence(emb, seq_len, batch_first=True)
        out, _ = self.lstm(out)
        out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True, padding_value=self.pad_idx, total_length=max_len)
        out = out.transpose(-1, -2)
        pooled_out = [conv(out) for conv in self.layers]
        pooled_out = torch.cat(pooled_out, dim=-1)
        feats = self.linear(pooled_out)
        feats = self.dropout(feats)  # (batch_size, tgt_size)
        return feats
