from common.configs.tools import seed_num
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import torch.nn.init as init
from common.configs.tools import seed_num
torch.manual_seed(seed_num)
random.seed(seed_num)


class TextCNN1d(nn.Module):
    """An 1D Convulational Neural Network for Sentence Classification."""

    def __init__(self, args):
        """
        The constructor for CNN_NLP class.

        Args:
            pretrained_embedding (torch.Tensor): Pretrained embeddings with
                shape (vocab_size, embed_dim)
            freeze_embedding (bool): Set to False to fine-tune pretraiend
                vectors. Default: False
            vocab_size (int): Need to be specified when not pretrained word
                embeddings are not used.
            embed_dim (int): Dimension of word vectors. Need to be specified
                when pretrained word embeddings are not used. Default: 300
            filter_sizes (List[int]): List of filter sizes. Default: [3, 4, 5]
            num_filters (List[int]): List of number of filters, has the same
                length as `filter_sizes`. Default: [100, 100, 100]
            n_classes (int): Number of classes. Default: 2
            dropout (float): Dropout rate. Default: 0.5
        """

        super(TextCNN1d, self).__init__()
        self.args = args

        freeze_embedding = False
        self.embed_dim = self.args.embed_dim
        filter_sizes = self.args.kernel_sizes
        num_filters = self.args.kernel_num
        num_classes = self.args.class_num
        dropout = args.dropout

        # Embedding layer
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = self.args.pretrained_weight.shape
            self.embedding = nn.Embedding.from_pretrained(self.args.pretrained_weight,
                                                          freeze=freeze_embedding)
        else:
            self.embedding = nn.Embedding(num_embeddings=self.args.embed_num,
                                          embedding_dim=self.args.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        # Conv Network

        self.conv1d_list = nn.ModuleList([
            nn.Conv1d(in_channels=self.embed_dim,
                      out_channels=num_filters,
                      kernel_size=filter_sizes[i])
            for i in range(len(filter_sizes))
        ])
        # Fully-connected layer and Dropout
        self.fc = nn.Linear(num_filters * len(filter_sizes), num_classes)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, input_ids):
        """Perform a forward pass through the network.

        Args:
            input_ids (torch.Tensor): A tensor of token ids with shape
                (batch_size, max_sent_length)

        Returns:
            logits (torch.Tensor): Output logits with shape (batch_size,
                n_classes)
        """

        # Get embeddings from `input_ids`. Output shape: (b, max_len, embed_dim)
        x_embed = self.embedding(input_ids).float()

        # Permute `x_embed` to match input shape requirement of `nn.Conv1d`.
        # Output shape: (b, embed_dim, max_len)
        x_reshaped = x_embed.permute(0, 2, 1)

        # Apply CNN and ReLU. Output shape: (b, num_filters[i], L_out)
        x_conv_list = [F.relu(conv1d(x_reshaped))
                       for conv1d in self.conv1d_list]

        # Max pooling. Output shape: (b, num_filters[i], 1)
        x_pool_list = [F.max_pool1d(x_conv, kernel_size=x_conv.shape[2])
                       for x_conv in x_conv_list]

        # Concatenate x_pool_list to feed the fully connected layer.
        # Output shape: (b, sum(num_filters))
        x_fc = torch.cat([x_pool.squeeze(dim=2) for x_pool in x_pool_list],
                         dim=1)

        # Compute logits. Output shape: (b, n_classes)
        logits = self.fc(self.dropout(x_fc))

        return logits


class TextCNN2d(nn.Module):
    # droput = 0.4
    # learning_rate = 0.5
    # epochs = 100
    # embedding = None
    def __init__(self, args):
        super(TextCNN2d, self).__init__()
        self.args = args

        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        Ci = 1
        Co = args.kernel_num
        Ks = args.kernel_sizes

        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embeddings = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embeddings = nn.Embedding(num_embeddings=V,
                                           embedding_dim=self.embed_dim,
                                           padding_idx=0,
                                           max_norm=5.0)

        if args.wide_conv is True:
            print("using wide convolution")
            self.convs1 = nn.ModuleList([nn.Conv2d(in_channels=Ci, out_channels=Co, kernel_size=(K, D), stride=(1, 1),
                                                   padding=(K//2, 0), dilation=1, bias=False) for K in Ks])
        else:
            print("using narrow convolution")
            self.convs1 = nn.ModuleList([nn.Conv2d(
                in_channels=Ci, out_channels=Co, kernel_size=(K, D), bias=True) for K in Ks])
        print(self.convs1)

        if args.init_weight:
            print("Initing W .......")
            for conv in self.convs1:
                init.xavier_normal(
                    conv.weight.data, gain=np.sqrt(args.init_weight_value))
                fan_in, fan_out = TextCNN2d.calculate_fan_in_and_fan_out(
                    conv.weight.data)
                print(" in {} out {} ".format(fan_in, fan_out))
                std = np.sqrt(args.init_weight_value) * \
                    np.sqrt(2.0 / (fan_in + fan_out))

        self.dropout = nn.Dropout(args.dropout)
        self.dropout_embed = nn.Dropout(args.dropout_embed)
        in_fea = len(Ks) * Co

        self.fc1 = nn.Linear(in_features=in_fea,
                             out_features=in_fea//2, bias=True)
        self.fc2 = nn.Linear(in_features=in_fea//2, out_features=C, bias=True)

        self.fc = nn.Linear(in_features=in_fea, out_features=C, bias=True)
        # whether to use batch normalizations
        if args.batch_normalizations is True:
            print("using batch_normalizations in the model......")
            self.convs1_bn = nn.BatchNorm2d(num_features=Co, momentum=args.bath_norm_momentum,
                                            affine=args.batch_norm_affine)
            self.fc1_bn = nn.BatchNorm1d(num_features=in_fea//2, momentum=args.bath_norm_momentum,
                                         affine=args.batch_norm_affine)
            self.fc2_bn = nn.BatchNorm1d(num_features=C, momentum=args.bath_norm_momentum,
                                         affine=args.batch_norm_affine)

    def calculate_fan_in_and_fan_out(tensor):
        dimensions = tensor.ndimension()
        if dimensions < 2:
            raise ValueError(
                "Fan in and fan out can not be computed for tensor with less than 2 dimensions")

        if dimensions == 2:  # Linear
            fan_in = tensor.size(1)
            fan_out = tensor.size(0)
        else:
            num_input_fmaps = tensor.size(1)
            num_output_fmaps = tensor.size(0)
            receptive_field_size = 1
            if tensor.dim() > 2:
                receptive_field_size = tensor[0][0].numel()
            fan_in = num_input_fmaps * receptive_field_size
            fan_out = num_output_fmaps * receptive_field_size

        return fan_in, fan_out

    def forward(self, x):
        x = self.embeddings(x).float()  # (N,W,D)
        x = self.dropout_embed(x)
        x = x.unsqueeze(1)  # (N,Ci,W,D)
        if self.args.batch_normalizations is True:
            x = [self.convs1_bn(F.relu(conv(x))).squeeze(3)
                 for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2)
                 for i in x]  # [(N,Co), ...]*len(Ks)
        else:
            x = [F.relu(conv(x)).squeeze(3)
                 for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
            x = [F.max_pool1d(i, i.size(2)).squeeze(2)
                 for i in x]  # [(N,Co), ...]*len(Ks)
        x = torch.cat(x, 1)
        x = self.dropout(x)  # (N,len(Ks)*Co)

        if self.args.batch_normalizations is True:
            x = self.fc1_bn(self.fc1(x))
            logit = self.fc2_bn(self.fc2(F.relu(x)))
        else:
            logit = self.fc(x)
        return logit


class DeepCNN(nn.Module):

    def __init__(self, args):
        super(DeepCNN, self).__init__()
        self.args = args

        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        Ci = 1
        Co = args.kernel_num
        Ks = args.kernel_sizes
        if args.max_norm is not None:
            print("max_norm = {} ".format(args.max_norm))
            self.embed = nn.Embedding(
                V, D, max_norm=5, scale_grad_by_freq=True, padding_idx=args.paddingId)
        else:
            print("max_norm = {} ".format(args.max_norm))
            self.embed = nn.Embedding(
                V, D, scale_grad_by_freq=True, padding_idx=args.paddingId)

        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embed = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embed = nn.Embedding(num_embeddings=V,
                                      embedding_dim=self.embed_dim,
                                      padding_idx=0,
                                      max_norm=5.0)

        # cons layer
        self.convs1 = nn.ModuleList([nn.Conv2d(Ci, D, (K, D), stride=1, padding=(
            K//2, 0), bias=True) for K in Ks])
        self.convs2 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D), stride=1, padding=(
            K//2, 0), bias=True) for K in Ks])
        print(self.convs1)
        print(self.convs2)

        if args.init_weight:
            print("Initing W .......")
            for (conv1, conv2) in zip(self.convs1, self.convs2):
                init.xavier_normal(conv1.weight.data,
                                   gain=np.sqrt(args.init_weight_value))
                init.uniform(conv1.bias, 0, 0)
                init.xavier_normal(conv2.weight.data,
                                   gain=np.sqrt(args.init_weight_value))
                init.uniform(conv2.bias, 0, 0)

        # # for cnn cuda
        # if self.args.cuda is True:
        #     for conv in self.convs1:
        #         conv=conv.cuda()

        # # for cnn cuda
        # if self.args.cuda is True:
        #     for conv in self.convs2:
        #         conv=conv.cuda()

        # dropout
        self.dropout = nn.Dropout(args.dropout)
        # linear
        in_fea = len(Ks) * Co
        self.fc1 = nn.Linear(in_features=in_fea,
                             out_features=in_fea // 2, bias=True)
        self.fc2 = nn.Linear(in_features=in_fea // 2,
                             out_features=C, bias=True)

    def forward(self, x):
        # (N,W,D) #  torch.Size([64, 43, 300])
        one_layer = self.embed(x).float()
        # (N,Ci,W,D)  #  torch.Size([64, 1, 43, 300])
        one_layer = one_layer.unsqueeze(1)
        # one layer
        one_layer = [torch.transpose(F.relu(conv(one_layer)).squeeze(
            3), 1, 2) for conv in self.convs1]  # torch.Size([64, 100, 36])
        # two layer
        two_layer = [F.relu(conv(one_layer.unsqueeze(1))).squeeze(3)
                     for (conv, one_layer) in zip(self.convs2, one_layer)]
        # print("two_layer {}".format(two_layer[0].size()))
        # pooling
        # torch.Size([64, 100]) torch.Size([64, 100])
        output = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in two_layer]
        output = torch.cat(output, 1)  # torch.Size([64, 300])
        # dropout
        output = self.dropout(output)
        # linear
        output = self.fc1(F.relu(output))
        logit = self.fc2(F.relu(output))
        return logit
