"""

Our TextCNN model.
[TODO](zbw): complete the TextCNN model.

"""

import torch
import torch.nn as nn
import torch.nn.functional as F

class TextCNN(nn.Module):
    def __init__(self, embed_dim=200, windows=[3,4,5], kernel_num=100, fc=[256,256,100], dropout=0.5, class_num=4):
        super().__init__()
        self.class_num = class_num
        self.windows = windows
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        self.conv = list()
        for i,h in enumerate(windows):
            self.conv.append(nn.Conv2d(1, kernel_num, kernel_size=(h, embed_dim)))
        
        li = [len(windows) * kernel_num] + fc + [class_num]
        self.fc = []
        for i, j in zip(li, li[1:]):
            self.fc.extend([
                nn.Linear(i,j),
                nn.ReLU(),
            ])
        self.fc.pop() # 最后一层不需要 relu
        self.fc = nn.Sequential(*self.fc)
        self.dropout = nn.Dropout(dropout)
        
    @staticmethod
    def conv_and_pool(x, conv):
        x = conv(x)
        x = F.relu(x.squeeze(3))
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x
    
    def forward(self, x):
        """
        Compute the forward pass of TextCNN.
        
        Args:
        - x: (batch, len, w2v_dim) torch.
        
        Returns:
        - logit: (batch, 4) the corresponding logits.
        """
        x = x.unsqueeze(1)
        x = torch.cat([self.conv_and_pool(x, conv) for conv in self.conv], dim=1)
        x = self.dropout(x)
        x = self.fc(x)
        logit = F.log_softmax(x, dim=1)
        return logit
    
    def loss(self, y, labels):
        """
        Compute the loss.
        
        Args:
        - y: :torch.tensor: (batch,) the prediction.
        - labels: :torch.LongTensor: (batch,) the true labels.
        
        Returns:
        - loss: :torch.tensor: the corresponding tensor.
        """          
        loss = F.nll_loss(y, labels)
        return loss
    
    
    
    