import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import torch.nn.init as init
from common.configs.tools import seed_num
torch.manual_seed(seed_num)
random.seed(seed_num)


class AttentionCNN(nn.Module):
    # dropout = 0.7
    # learning rate = 0.5
    # embedding = None"""

    def __init__(self, args):

        super(AttentionCNN, self).__init__()
        self.args = args

        pretrained_embedding = self.args.pretrained_weight
        freeze_embedding = False
        vocab_size = self.args.embed_num
        embed_dim = self.args.embed_dim
        filter_sizes = self.args.kernel_sizes
        num_filters = self.args.kernel_num
        num_classes = self.args.class_num
        dropout = args.dropout

        # Embedding layer
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = pretrained_embedding.shape
            self.embedding = nn.Embedding.from_pretrained(pretrained_embedding,
                                                          freeze=freeze_embedding)
        else:
            self.embed_dim = embed_dim
            self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                          embedding_dim=self.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        # Conv Network

        self.conv1d_list = nn.ModuleList([
            nn.Conv1d(in_channels=self.embed_dim,
                      out_channels=num_filters,
                      kernel_size=filter_sizes[i])
            for i in range(len(filter_sizes))
        ])

        self.attention = SelfAttention(num_filters)
        # Fully-connected layer and Dropout
        self.fc = nn.Linear(len(filter_sizes)*num_filters, num_classes)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, input_ids):
        # Get embeddings from `input_ids`. Output shape: (b, max_len, embed_dim)
        x_embed = self.embedding(input_ids).float()

        # Permute `x_embed` to match input shape requirement of `nn.Conv1d`.
        # Output shape: (b, embed_dim, max_len)
        x_reshaped = x_embed.permute(0, 2, 1)

        # Apply CNN and ReLU. Output shape: (b, num_filters[i], L_out)
        x_conv_list = [F.relu(conv1d(x_reshaped))
                       for conv1d in self.conv1d_list]

        conved_att = [(self.attention(conv.permute(0, 2, 1))
                       ).permute(0, 2, 1) for conv in x_conv_list]

        # Max pooling. Output shape: (b, num_filters[i], 1)
        pooled = [F.max_pool1d(x_conv, kernel_size=x_conv.shape[2]).squeeze(2)
                  for x_conv in conved_att]

        # Concatenate x_pool_list to feed the fully connected layer.
        # Output shape: (b, sum(num_filters))
        logits = self.fc(self.dropout(torch.cat(pooled, dim=1)))

        return logits


class SelfAttention(nn.Module):
    def __init__(self, filters):
        super().__init__()
        self.projection = nn.Sequential(
            nn.Linear(filters, 64),
            nn.ReLU(True),
            nn.Linear(64, 1)
        )

    def forward(self, encoder_outputs):
        # encoder_outputs = [batch size, sent len, filters]
        energy = self.projection(encoder_outputs)
        # energy = [batch size, sent len, 1]
        weights = F.softmax(energy.squeeze(-1), dim=1)
        # weights = [batch size, sent len]
        outputs = (encoder_outputs * weights.unsqueeze(-1))
        # outputs = [batch size, sent len, filters]
        return outputs
