#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author: juzipi
@file: layers.py
@time:2022/04/18
@description:
"""
from torch import nn
from .utils import sort_by_seq_lens, masked_softmax, weighted_sum


class RNNEncoder(nn.Module):
    """
    基于RNN的编码器
    """

    def __init__(self,
                 rnn_type,
                 input_size,
                 hidden_size,
                 num_layers=1,
                 bias=True,
                 dropout=0.0,
                 bidirectional=False):
        """
        Args:
            rnn_type: The type of RNN to use as encoder in the module. Must be a class inheriting from torch.nn.RNNBase
                (such as torch.nn.LSTM for example).
            input_size: The number of expected features in the input of the  module.
            hidden_size: The number of features in the hidden state of the RNN used as encoder by the module.
            num_layers: The number of recurrent layers in the encoder of the module. Defaults to 1.
            bias: If False, the encoder does not use bias weights b_ih and b_hh. Defaults to True.
            dropout: If non-zero, introduces a dropout layer on the outputs of each layer of the encoder except the last
                     one, with dropout probability equal to 'dropout'. Defaults to 0.0.
            bidirectional: If True, the encoder of the module is bidirectional. Defaults to False.
        """
        assert issubclass(rnn_type, nn.RNNBase),"rnn_type must be a class inheriting from torch.nn.RNNBase"
        super(RNNEncoder, self).__init__()
        self.rnn_type = rnn_type
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bias = bias
        self.dropout = dropout
        self.bidirectional = bidirectional
        self._encoder = rnn_type(input_size, hidden_size,  num_layers=num_layers,
                                 bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional)

    def forward(self, sequences_batch, sequences_lengths):
        """
        Args:
            sequences_batch: A batch of variable length sequences of vectors. The batch is assumed to be of size
                             (batch, sequence, vector_dim).
            sequences_lengths: A 1D tensor containing the sizes of the sequences in the input batch.
        Returns:
            reordered_outputs: The outputs (hidden states) of the encoder for  the sequences in the input batch,
                               in the same order.
        """
        sorted_batch, sorted_lengths, _, restoration_idx = sort_by_seq_lens(sequences_batch, sequences_lengths)
        packed_batch = nn.utils.rnn.pack_padded_sequence(sorted_batch, sorted_lengths.cpu(), batch_first=True)
        # rnn 特征提取
        outputs, _ = self._encoder(packed_batch)
        outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
        # 结果特征顺序复原
        restoration_idx = restoration_idx.to(sequences_batch.device)
        reordered_outputs = outputs.index_select(0, restoration_idx)
        return reordered_outputs


class SoftmaxAttention(nn.Module):
    """
    Attention layer taking premises and hypotheses encoded by an RNN as input
    and computing the soft attention between their elements.
    The dot product of the encoded vectors in the premises and hypotheses is
    first computed. The softmax of the result is then used in a weighted sum
    of the vectors of the premises for each element of the hypotheses, and
    conversely for the elements of the premises.
    """

    def forward(self, premise_batch, premise_mask, hypothesis_batch, hypothesis_mask):
        """
        Args:
            premise_batch: A batch of sequences of vectors representing the premises in some NLI task. The batch is
                           assumed to have the size (batch, sequences, vector_dim).
            premise_mask: A mask for the sequences in the premise batch, to ignore padding data in the sequences during
                          the computation of the attention.
            hypothesis_batch: A batch of sequences of vectors representing the hypotheses in some NLI task. The batch
                              is assumed to have the size (batch, sequences, vector_dim).
            hypothesis_mask: A mask for the sequences in the hypotheses batch, to ignore padding data in the sequences
                             during the computation of the attention.
        Returns:
            attended_premises: The sequences of attention vectors for the premises in the input batch.
            attended_hypotheses: The sequences of attention vectors for the hypotheses in the input batch.
        """
        # Dot product between premises and hypotheses in each sequence of
        # the batch.
        similarity_matrix = premise_batch.bmm(hypothesis_batch.transpose(2, 1).contiguous())

        # Softmax attention weights.
        prem_hyp_attn = masked_softmax(similarity_matrix, hypothesis_mask)
        hyp_prem_attn = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)

        # Weighted sums of the hypotheses for the the premises attention, and vice-versa for
        # the attention of the hypotheses.
        attended_premises = weighted_sum(hypothesis_batch, prem_hyp_attn, premise_mask)
        attended_hypotheses = weighted_sum(premise_batch, hyp_prem_attn, hypothesis_mask)
        return attended_premises, attended_hypotheses
