# coding: utf-8
# Author: Jockey Yan

from .BasicModule import BasicModule
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy

torch.manual_seed(1)


class TextCNN(BasicModule):
    def __init__(self, opt):
        """
        initialize func.
        :param opt: config option class
        """
        super(TextCNN, self).__init__()
        self.model_name = "TextCNN"
        self.opt = opt

        hidden_dim = 256

        self.embed = nn.Embedding(opt.VOCAB_SIZE, opt.EMBEDDING_DIM)
        self.conv = nn.ModuleList([nn.Conv2d(1, opt.TITLE_DIM, (K, opt.EMBEDDING_DIM)) for K in opt.KERNEL_SIZE])
        
        self.dropout = nn.Dropout(0.5)
        self.fc1 = nn.Linear(len(opt.KERNEL_SIZE) * opt.TITLE_DIM, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, opt.NUM_CLASSES)

    def forward(self, input_x):
        # Embedding
        x = self.embed(input_x) # [batch_size, max_seq_len, emb_size]
        x = x.unsqueeze(1)      # [batch_size, 1, max_seq_len, emb_size]

        # [batch_size, num_kernels, max_seq_len-i+1]
        x = [F.relu( conv(x) ).squeeze(3) for conv in self.conv]
        x = [self.dropout(xi) for xi in x]      # Fine-tune
        # [(batch_size, num_kernels), ...] * len(kernel_sizes)
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
        x = torch.cat(x, 1)
        #print("After torch.cat size of x = {}".format(x.size()))

        #print("After dropout size of x={}".format(x.size()))
        x = self.dropout(F.relu(self.fc1(x)))
        logit = F.log_softmax(self.fc2(x))   # [batch_size, num_classes]
        #print("After softmax size of logit={}".format(logit.size()))

        return logit

