import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
from DataUtils.Common import seed_num
torch.manual_seed(seed_num)
random.seed(seed_num)
from self_attention import SelfAttentionDot
"""
Neural Networks model : Bidirection GRU
"""


class BiGRU(nn.Module):

    def __init__(self, word_length):
        super(BiGRU, self).__init__()
        self.hidden_dim = 32
        self.num_layers = 1
        V = word_length
        D = 256
        C = 3
        self.embed = nn.Embedding(V, D)
        # gru
        self.bigru = nn.GRU(D, self.hidden_dim, num_layers=self.num_layers,
                            bidirectional=True,batch_first=True)
        # linear
        self.hidden2label = nn.Linear(self.hidden_dim * 2, C)
        self.self_attention = SelfAttentionDot(64, 64)
    def forward(self, input):
        embed = self.embed(input)
        input = embed.view(-1,20, 256)
        # gru
        gru_out, _ = self.bigru(input)

        # pooling
        # gru_out = F.tanh(gru_out)
        gru_out=gru_out.transpose(1,2)
        gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
        # gru_out = self.self_attention(gru_out)
        gru_out = F.tanh(gru_out)
        # linear
        y = self.hidden2label(gru_out)
        logit = y
        return logit