# coding: utf-8
# Author: Miracle Yoo

from .BasicModule import BasicModule
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy

torch.manual_seed(1)


class TextCNNDeep(BasicModule):
    def __init__(self, opt):
        """
        initialize func.
        :param opt: config option class
        """
        super(TextCNNDeep, self).__init__()
        self.model_name = "TextCNNDeep"
        self.opt = opt
        
        self.embed = nn.Embedding(opt.VOCAB_SIZE, opt.EMBEDDING_DIM)
        self.tcnn_conv = nn.ModuleList([nn.Conv2d(1, opt.TITLE_DIM, (K, opt.EMBEDDING_DIM)) \
                                    for K in opt.KERNEL_SIZE]) # For TextCNN
        self.dropout = nn.Dropout(0.5)
        #self.tcnn_fc = nn.Linear(len(opt.KERNEL_SIZE) * opt.TITLE_DIM, opt.NUM_CLASSES) # For TextCNN 
        #self.fc = nn.Linear(opt.NUM_ID_FEATURE_MAP, opt.NUM_CLASSES)    # For DPCNN
        
        #self.change_dim_conv  = nn.Conv1d(opt.TITLE_DIM * len(opt.KERNEL_SIZE), 
        #        opt.NUM_ID_FEATURE_MAP, kernel_size=1, stride=1)

        #self.standard_pooling = nn.MaxPool1d(kernel_size=3, stride=2)
        self.standard_batchnm = nn.BatchNorm1d(num_features=opt.NUM_ID_FEATURE_MAP)
        self.standard_act_fun = nn.ReLU(inplace=True)

        '''
        if opt.USE_CHAR:
            # use char instead of word
            opt.VOCAB_SIZE = opt.CHAR_SIZE

        self.encoder = nn.Embedding(opt.VOCAB_SIZE, opt.EMBEDDING_DIM)
        
        # Region embedding layer
        question_convs = [nn.Sequential(
            nn.Conv1d(in_channels=opt.EMBEDDING_DIM,
                      out_channels=opt.TITLE_DIM,
                      kernel_size=kernel_size),
            nn.BatchNorm1d(opt.TITLE_DIM),
            nn.ReLU(inplace=True),
            
            nn.Conv1d(in_channels=opt.TITLE_DIM,
                      out_channels=opt.TITLE_DIM,
                      kernel_size=kernel_size),
            nn.BatchNorm1d(opt.TITLE_DIM),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=(opt.SENT_LEN - kernel_size * 2 + 2))
        ) for kernel_size in opt.KERNEL_SIZE]

        #self.num_seq = len(opt.KERNEL_SIZE)
        self.change_dim_conv  = nn.Conv1d(opt.TITLE_DIM * len(opt.KERNEL_SIZE), opt.NUM_ID_FEATURE_MAP,
                                          kernel_size=1, stride=1)
        self.standard_pooling = nn.MaxPool1d(kernel_size=3, stride=2)
        self.standard_batchnm = nn.BatchNorm1d(num_features=opt.NUM_ID_FEATURE_MAP)
        self.standard_act_fun = nn.ReLU()

        self.question_convs = nn.ModuleList(question_convs)
        '''
        # Full connection layer output
        self.fc = nn.Sequential(
            nn.Linear(12250, 3 * opt.NUM_ID_FEATURE_MAP),    #12250, 6000
            nn.BatchNorm1d(3 * opt.NUM_ID_FEATURE_MAP),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(3 * opt.NUM_ID_FEATURE_MAP, opt.NUM_CLASSES)     # Useful or not ???
        )

    def forward(self, input_x):
        # 0. Embedding
        x = self.embed(input_x) # [batch_size, max_seq_len, emb_size]

        '''
        # 1. TextCNN layer
        x = x.unsqueeze(1)      # [batch_size, 1, max_seq_len, emb_size]
        x = [F.relu( conv(x) ).squeeze(3) for conv in self.tcnn_conv]
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
        x = torch.cat(x, 1)     # [batch_size, num_kernel * title_dim]
        x = self.dropout(x)

        x = x.unsqueeze(2)      # As input of conv3x3, [batch_size, num_kernel * title_dim, 1]
        #print("1. TextCNN layers: after dropout size of x = {}".format(x.size()))
        '''
        # Only for debug, TextCNN return
        #logit = F.log_softmax(self.tcnn_fc(x))
        #return logit
        
        # 1. Region embedding layer
        #x = [seq_conv(x.permute(0, 2, 1)) for seq_conv in self.seq_convs]
        x = x.unsqueeze(1)      # [batch_size, 1, max_seq_len, emb_size]
        x = [ self.conv_same_as(x, k) for k in self.opt.KERNEL_SIZE ]
        x = torch.cat(x, dim=1)     # [batch_size, title_dim * num_kernel, max_seq_len]
        x = self.dropout(x)         # Fine-tune 
        #print("1. Region embedding layers: size of x = {}".format(x.size()))
        
        # 2. conv3x3 two layers
        xp = x
        #xp = self.change_dim_conv(xp)  
        xp = self.conv1x1(xp.size(1), self.opt.NUM_ID_FEATURE_MAP, padding=0)(xp)
        #print("2. conv3x3 two layers: size of xp = {}".format(xp.size()))
        
        x  = self.conv3x3(x.size(1), self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.standard_act_fun(x)
        x  = self.conv3x3(self.opt.NUM_ID_FEATURE_MAP, self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.standard_act_fun(x)   # skip connection first or activate first ???
        #print("2. conv3x3 two layers: size of x = {}".format(x.size()))
        x  += xp                        # output: [batch_size, feature_map_size, 1]
        #print("2. Two conv3x3 layers: after 2*convs3x3 size of x = {}".format(x.size()))

        # 3. Pyramid layers
        for _ in range(1):      #  define 3 layers
            x = self._block(x)
        #print("3. Repeat building blocks: size of x = {}".format(x.size()))

        # 4. Full connection layer
        x = x.view(x.size(0), -1)      # output: [batch_size, feature_map_size]
        #print("4. Full connection layer: after view() size of x = {}".format(x.size()))
        #x = F.log_softmax(self.fc(x))   # Fine-tune #output: [batch_size, labels_num]
        x = self.fc(x)
        #print("4. Full connection layer: after fc() size of x = {}".format(x.size()))
        
        return x

    def _block(self, x):
        #self.standard_pooling(x)
        x = nn.MaxPool1d(kernel_size=3, stride=2)(x)    
        
        # Reshape xp to match x
        xp = x
        xp = self.conv1x1(xp.size(1), self.opt.NUM_ID_FEATURE_MAP, padding=0)(xp)
        #print("3. _block: size of xp = {}".format(xp.size()))
        
        #x  = self.conv3x3(self.opt.NUM_ID_FEATURE_MAP, self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.conv3x3(x.size(1), self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.dropout(x)        # Fine-tune
        x  = self.standard_act_fun(x)
        
        x  = self.conv3x3(self.opt.NUM_ID_FEATURE_MAP, self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.dropout(x)        # Fine-tune
        x  = self.standard_act_fun(x)
        #print("3. _block: size of x = {}".format(x.size()))

        x += xp
        return x

    # 等长卷积，输入为[batch_size, 1, max_seq_len, emb_size]，输出为[batch_size, title_dim, max_seq_len]
    def conv_same_as(self, x, kernel_size=2):
        if kernel_size % 2 == 0:
            pad_op = nn.ZeroPad2d((0, 0, kernel_size//2, kernel_size//2 -1))
        else:
            pad_op = nn.ZeroPad2d((0, 0, (kernel_size-1)//2, (kernel_size-1)//2 ))
        if self.opt.USE_CUDA: pad_op = pad_op.cuda()

        conv_op = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=self.opt.TITLE_DIM, \
                    kernel_size=(kernel_size, self.opt.EMBEDDING_DIM), stride=1),
            nn.BatchNorm2d(self.opt.TITLE_DIM),
            nn.ReLU(inplace=True)
        )
        if self.opt.USE_CUDA: conv_op = conv_op.cuda()

        x_conv = conv_op( pad_op(x))
       
        return x_conv.squeeze(3)        #[batch_size, title_dim, max_seq_len]

    def conv3x3(self, in_channels, out_channels, stride=1, padding=1):
        """3x3 convolution with padding"""
        _conv =  nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=padding, bias=False)
        if self.opt.USE_CUDA:
            return _conv.cuda()
        else:
            return _conv

    def conv1x1(self, in_channels, out_channels, stride=1, padding=1):
        """1x1 convolution with padding"""
        _conv =  nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, padding=padding, bias=False)
        if self.opt.USE_CUDA:
            return _conv.cuda()
        else:
            return _conv


        '''
        # 0. unsupervized word embedding
        question = self.encoder(question)       # output: [batch_size, seq_max_len, embedding_size]
        #print("0. Word embedding: size of question = {}".format(question.size()))


        # 1. Region embedding layer, output: [batch_size, title_dim, 1]
        # 因为词嵌入的维度要作为后面conv1的输入的channel，因此用permute 交换维度 ???
        x  = [question_conv(question.permute(0, 2, 1)) for question_conv in self.question_convs]
        #x = [ question_conv(x) for question_conv in self.question_convs ]
        x  = torch.cat(x, dim=1)    # output: [batch_size, title_dim * kernel_num, 1]
        #print("1. Region embedding layer: after cat size of x = {}".format(x.size()))

        # 2. Two conv3x3 layers
        xp = x
        xp = self.change_dim_conv(xp)   # output: [batch_size, feature_map_size, 1]
        #print("2. Two conv3x3 layers: after changed dim size of xp = {}".format(xp.size()))
        x  = self.conv3x3(in_channels=x.size(1), out_channels=self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.standard_act_fun(x)
        x  = self.conv3x3(self.opt.NUM_ID_FEATURE_MAP, self.opt.NUM_ID_FEATURE_MAP)(x)
        x  = self.standard_batchnm(x)
        x  = self.standard_act_fun(x)   # skip connection first or activate first ???
        x  += xp                        # output: [batch_size, feature_map_size, 1]
        #print("2. Two conv3x3 layers: after 2*convs3x3 size of x = {}".format(x.size()))
        
        # 3. Repeat n times of building blocks
        while x.size(2) > 2:
        #while x.size(1) > 2:
            x = self._block(x)
            print("3. Repeat building blocks: size of x = {}".format(x.size()))

        # 4. Full connection layer
        x  = x.view(x.size(0), -1)      # output: [batch_size, feature_map_size]
        #print("4. Full connection layer: after view() size of x = {}".format(x.size()))
        x  = self.fc(x)                 # output: [batch_size, labels_num]
        #print("4. Full connection layer: after fc() size of x = {}".format(x.size()))
        return x

        '''

