import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import torch.nn.init as init
from common.configs.tools import seed_num
torch.manual_seed(seed_num)
random.seed(seed_num)


class TestM(nn.Module):

    def __init__(self, args):

        super(TestM, self).__init__()
        self.args = args

        freeze_embedding = False
        self.embed_dim = self.args.embed_dim
        filter_sizes = self.args.kernel_sizes
        num_filters = self.args.kernel_num
        num_classes = self.args.class_num
        dropout = args.dropout

        # Embedding layer
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = self.args.pretrained_weight.shape
            self.embedding = nn.Embedding.from_pretrained(self.args.pretrained_weight,
                                                          freeze=freeze_embedding)
        else:
            self.embedding = nn.Embedding(num_embeddings=self.args.embed_num,
                                          embedding_dim=self.args.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        # Conv Network

        self.conv1d_list = nn.ModuleList([
            nn.Conv1d(in_channels=self.embed_dim,
                      out_channels=num_filters,
                      kernel_size=filter_sizes[i])
            for i in range(len(filter_sizes))
        ])
        # Fully-connected layer and Dropout
        self.fc = nn.Linear(num_filters * len(filter_sizes), num_classes)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, input_ids):
        """Perform a forward pass through the network.

        Args:
            input_ids (torch.Tensor): A tensor of token ids with shape
                (batch_size, max_sent_length)

        Returns:
            logits (torch.Tensor): Output logits with shape (batch_size,
                n_classes)
        """

        # Get embeddings from `input_ids`. Output shape: (b, max_len, embed_dim)
        x_embed = self.embedding(input_ids).float()

        # Permute `x_embed` to match input shape requirement of `nn.Conv1d`.
        # Output shape: (b, embed_dim, max_len)
        x_reshaped = x_embed.permute(0, 2, 1)

        # Apply CNN and ReLU. Output shape: (b, num_filters[i], L_out)
        x_conv_list = [F.relu(conv1d(x_reshaped))
                       for conv1d in self.conv1d_list]
        print(x_conv_list[0].shape)

        # Max pooling. Output shape: (b, num_filters[i], 1)
        x_pool_list = [F.max_pool1d(x_conv, kernel_size=x_conv.shape[2])
                       for x_conv in x_conv_list]
        print(x_pool_list[0].shape)
        print(dd)

        # Concatenate x_pool_list to feed the fully connected layer.
        # Output shape: (b, sum(num_filters))
        x_fc = torch.cat([x_pool.squeeze(dim=2) for x_pool in x_pool_list],
                         dim=1)

        # Compute logits. Output shape: (b, n_classes)
        logits = self.fc(self.dropout(x_fc))

        return logits
