import torch
import torch.nn.functional as F
from torch import nn
from dataset import TEXT, vocab_size
from configs import BasicConfigs
from model.model_parts import GlobalMaxPool1d

bc = BasicConfigs()


class TextCNN(nn.Module):
    def __init__(self,kernel_sizes=bc.kernel_sizes, num_channels=bc.num_channels,embed_size=vocab_size[1]):
        super(TextCNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size[0], embed_size)
        self.embedding.weight.data.copy_(TEXT.vocab.vectors)
        self.constant_embedding = nn.Embedding(vocab_size[0], embed_size)
        self.constant_embedding.weight.data.copy_(TEXT.vocab.vectors)
        self.constant_embedding.weight.requires_grad = False
        self.dropout = nn.Dropout(0.5)
        self.decoder = nn.Linear(sum(num_channels), 2)
        self.pool = GlobalMaxPool1d()
        self.convs = nn.ModuleList()  # 创建多个一维卷积层
        for c, k in zip(num_channels, kernel_sizes):
            self.convs.append(nn.Conv1d(in_channels = 2*embed_size,
                                        out_channels = c,
                                        kernel_size = k))

    def forward(self, inputs):
        # 将两个形状是(批量大小, 词数, 词向量维度)的嵌入层的输出按词向量连结
        embeddings = torch.cat((
            self.embedding(inputs),
            self.constant_embedding(inputs)), dim=2) # (batch, seq_len, 2*embed_size)
        # 根据Conv1D要求的输入格式，将词向量维，即一维卷积层的通道维(即词向量那一维)，变换到前一维
        embeddings = embeddings.permute(1, 2, 0)
        # 对于每个一维卷积层，在时序最大池化后会得到一个形状为(批量大小, 通道大小, 1)的
        # Tensor。使用flatten函数去掉最后一维，然后在通道维上连结
        encoding = torch.cat([self.pool(F.relu(conv(embeddings))).squeeze(-1) for conv in self.convs], dim=1)
        # 应用丢弃法后使用全连接层得到输出
        outputs = self.decoder(self.dropout(encoding))
        return outputs