import torch
from torch import nn
import torch.nn.functional as F

class textCNN(nn.Module):
    def __init__(self, config, TEXT):
        '''这里传入一个字典，表示输入维度等系一列超参数
        '''
        super(textCNN,self).__init__()
        self.config = config
        conv_num = [128, 64, 64, 32, 16]
        kernel_size = [3, 4, 5, 16, 32]
        self.pooling_kernel = [(720 - size + 1) for size in kernel_size]
        # layers
        if config.pre_word_vec:
            self.embed_stable = nn.Embedding.from_pretrained(TEXT.vocab.vectors, freeze=True)
            self.embed_unstable = nn.Embedding.from_pretrained(TEXT.vocab.vectors, freeze=False)
            self.embed = nn.ModuleList([self.embed_stable, self.embed_unstable])
        else:
            self.embed = nn.ModuleList([nn.Embedding(len(TEXT.vocab), config.word_vec_num)])
        self.dropout = nn.Dropout(p=config.dropout)
        self.conv = nn.ModuleList([nn.Conv2d(in_channels=2, out_channels=conv_num[i], 
                                    kernel_size=(kernel_size[i], config.word_vec_num)) for i in range(len(kernel_size))])
        self.batchnorm = nn.ModuleList([nn.BatchNorm2d(conv_num[i]) for i in range(len(kernel_size))])
        self.fully_connect = nn.Linear(sum(conv_num), config.class_num)
    
    def forward(self, x):
        x_list = [embed(x).unsqueeze(1) for embed in self.embed]
        # x_1 = self.embed_stable(x).unsqueeze(1) # batch * W * E, E=词向量维数
        # x_2 = self.embed(x).unsqueeze(1)
        # x = torch.cat([x_1,x_2], dim=1)# batch * 2 * W * E
        x = torch.cat(x_list, dim=1)# batch * 2 * W * E
        x = [conv(x) for conv in self.conv] #len(conv_size) * batch * conv_num * H * W=1
        x = [self.batchnorm[i](x[i]) for i in range(len(x))] #len(conv_size) * batch * conv_num * H * W=1
        x = [F.relu(x[i]) for i in range(len(x))]
        x = [F.max_pool1d(conv_res.squeeze(3), kernel_size=self.pooling_kernel[i]) for i, conv_res in enumerate(x)] #len(kernel_size) * batch * conv_num * 1
        x = torch.cat(x, 1) # batch * (len(kernel_size)*conv_num) * 1
        x = self.dropout(x)
        return self.fully_connect(x.squeeze(2)) # batch * class_num
    
    def weight_init(self):
        with torch.no_grad():
            for conv in self.conv:
                nn.init.kaiming_normal_(conv.weight)
                nn.init.constant_(conv.bias, 0.0)
            nn.init.kaiming_normal_(self.fully_connect.weight)
            nn.init.constant_(self.fully_connect.bias, 0.0)
