import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
from common.configs.tools import seed_num
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)


# class BiLSTM(nn.Module):

#     def __init__(self, args):
#         super(BiLSTM, self).__init__()
#         self.hidden_size = 64
#         drp = 0.1
#         self.embedding = nn.Embedding(max_features, embed_size)
#         self.embedding.weight = nn.Parameter(
#             torch.tensor(embedding_matrix, dtype=torch.float32))
#         self.embedding.weight.requires_grad = False
#         self.lstm = nn.LSTM(embed_size, self.hidden_size,
#                             bidirectional=True, batch_first=True)
#         self.linear = nn.Linear(self.hidden_size*4, 64)
#         self.relu = nn.ReLU()
#         self.dropout = nn.Dropout(drp)
#         self.out = nn.Linear(64, 1)

#     def forward(self, x):
#         h_embedding = self.embedding(x)
#         h_embedding = torch.squeeze(torch.unsqueeze(h_embedding, 0))

#         h_lstm, _ = self.lstm(h_embedding)
#         avg_pool = torch.mean(h_lstm, 1)
#         max_pool, _ = torch.max(h_lstm, 1)
#         #print("avg_pool", avg_pool.size())
#         #print("max_pool", max_pool.size())
#         conc = torch.cat((avg_pool, max_pool), 1)
#         conc = self.relu(self.linear(conc))
#         conc = self.dropout(conc)
#         out = self.out(conc)
#         return out


class BiLSTM(torch.nn.Module):
    def __init__(self, args):
        super(BiLSTM, self).__init__()
        self.args = args
        vocab_size = args.embed_num
        embedding_dim = args.embed_dim
        hidden_dim = args.lstm_hidden_dim
        lstm_num_layers = args.lstm_num_layers
        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embedding = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                          embedding_dim=self.embed_dim,
                                          padding_idx=0,
                                          max_norm=5.0)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True,
                            dropout=args.dropout, bidirectional=True)
        self.linear1 = nn.Linear(hidden_dim, hidden_dim*2)
        self.linear2 = nn.Linear(hidden_dim*2, args.class_num)

        self.dropout = nn.Dropout(args.dropout)

    def forward(self, x):
        x = self.embedding(x).float()
        x = self.dropout(x)
        lstm_out, (ht, ct) = self.lstm(x)
        ht_out = self.linear1(ht[-1])
        ht_out = self.linear2(F.relu(ht_out))

        logits = ht_out

        return logits


class BiLSTMAttn(torch.nn.Module):
    def __init__(self, args):
        super(BiLSTMAttn, self).__init__()

        """
		Arguments
		---------
		batch_size : Size of the batch which is same as the batch_size of the data returned by the TorchText BucketIterator
		output_size : 2 = (pos, neg)
		hidden_sie : Size of the hidden_state of the LSTM
		vocab_size : Size of the vocabulary containing unique words
		embedding_length : Embeddding dimension of GloVe word embeddings
		weights : Pre-trained GloVe word_embeddings which we will use to create our word_embedding look-up table 
		
		--------
		
		"""

        self.batch_size = args.batch_size
        self.output_size = args.class_num
        self.hidden_size = args.lstm_hidden_dim
        self.vocab_size = args.embed_num
        self.embedding_length = args.embed_dim

        if args.word_Embedding:
            self.vocab_size, self.embed_dim = args.pretrained_weight.shape
            self.embeddings = nn.Embedding.from_pretrained(
                args.pretrained_weight)
        else:
            self.embed_dim = args.embed_dim
            self.embeddings = nn.Embedding(num_embeddings=self.vocab_size,
                                           embedding_dim=self.embed_dim,
                                           padding_idx=0,
                                           max_norm=5.0)

        self.lstm = nn.LSTM(self.embedding_length,
                            self.hidden_size, bidirectional=False)
        self.label = nn.Linear(self.hidden_size, self.output_size)
        #self.attn_fc_layer = nn.Linear()

    def attention_net(self, lstm_output, final_state):
        """ 
        Now we will incorporate Attention mechanism in our LSTM model. In this new model, we will use attention to compute soft alignment score corresponding
        between each of the hidden_state and the last hidden_state of the LSTM. We will be using torch.bmm for the batch matrix multiplication.

        Arguments
        ---------

        lstm_output : Final output of the LSTM which contains hidden layer outputs for each sequence.
        final_state : Final time-step hidden state (h_n) of the LSTM

        ---------

        Returns : It performs attention mechanism by first computing weights for each of the sequence present in lstm_output and and then finally computing the
                          new hidden state.

        Tensor Size :
                                hidden.size() = (batch_size, hidden_size)
                                attn_weights.size() = (batch_size, num_seq)
                                soft_attn_weights.size() = (batch_size, num_seq)
                                new_hidden_state.size() = (batch_size, hidden_size)

        """
        # print(final_state.shape)

        hidden = final_state.squeeze(0)
        # print(lstm_output.shape)
        # print(hidden.shape)
        # print(hidden.unsqueeze(2).shape)
        attn_weights = torch.bmm(lstm_output, hidden.unsqueeze(2)).squeeze(2)
        soft_attn_weights = F.softmax(attn_weights, 1)
        new_hidden_state = torch.bmm(lstm_output.transpose(
            1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)

        return new_hidden_state

    def forward(self, input_sentences, batch_size=None):
        """ 
        Parameters
        ----------
        input_sentence: input_sentence of shape = (batch_size, num_sequences)
        batch_size : default = None. Used only for prediction on a single sentence after training (batch_size = 1)

        Returns
        -------
        Output of the linear layer containing logits for pos & neg class which receives its input as the new_hidden_state which is basically the output of the Attention network.
        final_output.shape = (batch_size, output_size)

        """

        input = self.embeddings(input_sentences).float()
        input = input.permute(1, 0, 2)
        if batch_size is None:
            h_0 = Variable(torch.zeros(
                1, self.batch_size, self.hidden_size).cuda())
            c_0 = Variable(torch.zeros(
                1, self.batch_size, self.hidden_size).cuda())
        else:
            h_0 = Variable(torch.zeros(1, batch_size, self.hidden_size).cuda())
            c_0 = Variable(torch.zeros(1, batch_size, self.hidden_size).cuda())

        # final_hidden_state.size() = (1, batch_size, hidden_size)
        output, (final_hidden_state, final_cell_state) = self.lstm(
            input, (h_0, c_0))
        # output.size() = (batch_size, num_seq, hidden_size)
        output = output.permute(1, 0, 2)

        attn_output = self.attention_net(output, final_hidden_state)
        logits = self.label(attn_output)

        return logits


class RNN_(nn.Module):

    def __init__(self, args, use_last=True, embedding_tensor=None,
                 padding_index=0, hidden_size=64, num_layers=1, batch_first=True):
        """
        Args:
            vocab_size: vocab size
            embed_size: embedding size
            num_output: number of output (classes)
            rnn_model:  LSTM or GRU
            use_last:  bool
            embedding_tensor:
            padding_index:
            hidden_size: hidden size of rnn module
            num_layers:  number of layers in rnn module
            batch_first: batch first option
        """

        super(RNN_, self).__init__()
        self.use_last = use_last
        vocab_size = args.embed_num
        embed_size = args.embed_dim
        num_output = args.class_num
        padding_index = args.paddingId
        hidden_size = args.lstm_hidden_dim
        num_layers = args.lstm_num_layers
        dropout = args.dropout
        # embedding
        self.encoder = None
        if torch.is_tensor(embedding_tensor):
            self.encoder = nn.Embedding(
                vocab_size, embed_size, padding_idx=padding_index, _weight=embedding_tensor)
            self.encoder.weight.requires_grad = False
        else:
            self.encoder = nn.Embedding(
                vocab_size, embed_size, padding_idx=padding_index)

        self.drop_en = nn.Dropout(p=dropout)

        # rnn module
        # if rnn_model == 'LSTM':
        #     self.rnn = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, dropout=0.5,
        #                        batch_first=True, bidirectional=True)
        # elif rnn_model == 'GRU':
        self.rnn = nn.GRU(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, dropout=0.5,
                          batch_first=True, bidirectional=True)
        # else:
        #     raise LookupError(' only support LSTM and GRU')

        self.bn2 = nn.BatchNorm1d(hidden_size*2)
        self.fc = nn.Linear(hidden_size*2, num_output)

    def forward(self, x, seq_lengths=16):
        '''
        Args:
            x: (batch, time_step, input_size)
        Returns:
            num_output size
        '''

        seq_lengths = torch.ShortTensor(seq_lengths)

        x_embed = self.encoder(x)
        x_embed = self.drop_en(x_embed)

        x_embed = x_embed.permute(1, 0, 2)
        packed_input = pack_padded_sequence(
            x_embed, seq_lengths.cpu().numpy(), enforce_sorted=False)
        # r_out shape (batch, time_step, output_size)
        # None is for initial hidden state
        packed_output, ht = self.rnn(packed_input, None)
        out_rnn, _ = pad_packed_sequence(
            packed_output, enforce_sorted=False)

        row_indices = torch.arange(0, x.size(0)).long()
        col_indices = seq_lengths - 1
        if next(self.parameters()).is_cuda:
            row_indices = row_indices.cuda()
            col_indices = col_indices.cuda()

        if self.use_last:
            last_tensor = out_rnn[row_indices, col_indices, :]
        else:
            # use mean
            last_tensor = out_rnn[row_indices, :, :]
            last_tensor = torch.mean(last_tensor, dim=1)

        fc_input = self.bn2(last_tensor)
        out = self.fc(fc_input)
        print(out.shape)
        return out
