import torch.nn.functional
from basenet import BaseNet
from torch import nn
import torch.nn.functional as F
from config import Config


class TextCNN(BaseNet):
    def __init__(self, vocab_size, embedding_dim, num_filters, kernel_size, dropout_keep_prob):
        """
        初始化网络
        :param vocab_size: 词汇量
        :param embedding_dim: 词向量降维后维度（原维度为vocab_size）
        :param num_filters: 卷积层filter的数量
        :param kernel_size: 卷积层filter的长度
        """
        super(TextCNN, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.convs = nn.ModuleList([nn.Conv2d(
            in_channels=1,
            out_channels=num_filters,
            kernel_size=(length, embedding_dim)
        ) for length in kernel_size])
        self.dropout = nn.Dropout(dropout_keep_prob)
        self.fc = nn.Linear((num_filters * len(kernel_size)), 2)

    def forward(self, batch_input):
        """
        :param batch_input: (batch_size, max_sen_len)
        :return: (batch_size, 2)
        """
        output = self.embeddings(batch_input.long())  # (batch_size, max_sen_len, embedding_dim)
        output = output.unsqueeze(1)  # (batch_size, 1, max_sen_len, embedding_dim)
        output = [conv(output).squeeze(3) for conv in self.convs]  # (batch_size, num_filters, max_sen_len) * 4
        output = [F.relu(x) for x in output]  # (batch_size, num_filters, max_sen_len) * 4
        output = [F.max_pool1d(x, x.shape[2]).squeeze(2) for x in output]  # (batch_size, num_filters) * 4
        output = torch.cat(output, 1)  # (batch_size, num_filters * 4)
        output = self.dropout(output)  # (batch_size, num_filters * 4)
        output = self.fc(output)  # (batch_size, 2)
        return output
