# -*- coding: utf-8 -*-
# file: dynamic_rnn.py
# author: songyouwei <youwei0314@gmail.com>
# Copyright (C) 2018. All Rights Reserved.


import torch
import torch.nn as nn
import numpy as np


class DynamicLSTM(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0,
                 bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
        """
        LSTM which can hold variable length sequence, use like TensorFlow's RNN(input, length...).

        :param input_size:The number of expected features in the input x
        :param hidden_size:The number of features in the hidden state h
        :param num_layers:Number of recurrent layers.
        :param bias:If False, then the layer does not use bias weights b_ih and b_hh. Default: True
        :param batch_first:If True, then the input and output tensors are provided as (batch, seq, feature)
        :param dropout:If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer
        :param bidirectional:If True, becomes a bidirectional RNN. Default: False
        :param rnn_type: {LSTM, GRU, RNN}
        """
        super(DynamicLSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bias = bias
        self.batch_first = batch_first
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.only_use_last_hidden_state = only_use_last_hidden_state  # 只使用最后的输出
        self.rnn_type = rnn_type  # 判定用哪个LSTM网络

        if self.rnn_type == 'LSTM':
            self.RNN = nn.LSTM(
                input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
        elif self.rnn_type == 'GRU':
            self.RNN = nn.GRU(
                input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
        elif self.rnn_type == 'RNN':
            self.RNN = nn.RNN(
                input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
                bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)

    def forward(self, x, x_len):  # 给定输入和输入的长度
        """
        sequence -> sort -> pad and pack ->process using RNN -> unpack ->unsort

        :param x: sequence embedding vectors
        :param x_len: numpy/tensor list
        :return:
        """
        """sort"""
        x_sort_idx = torch.sort(-x_len)[1].long()  # x.shape=(16,85,300),sort对给定tensor从小到大排序，返回排序后的结果以及对应的index(x_len中第7个元素最大，排第一，index[0]=7)，这里取负数是因为需要从大到小排序 x_len = 16,
        x_unsort_idx = torch.sort(x_sort_idx)[1].long()  # x_sort_idx代表一个batch的长度从大到小排序之后在原来的batch中的index列表,对其进行sort之后，就得到了有序x_len中的每个元素在原来x_len中的index
        x_len = x_len[x_sort_idx]
        x = x[x_sort_idx]  # batch的词向量表示，对应x_len序列序列按照句子长度进行了排序
        """pack"""
        x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len,
                                                          batch_first=self.batch_first)  # 输入的是词嵌入向量，由于填充的时候会有冗余，不想要冗余加入到RNN中，所以压紧

        # process using the selected RNN
        if self.rnn_type == 'LSTM':
            out_pack, (ht, ct) = self.RNN(x_emb_p, None) # ht:(1,16,300) ct:(1,16,300) out_pack = packsequence:4
        else:
            out_pack, ht = self.RNN(x_emb_p, None)
            ct = None  # 如果是非LSTM的，就不需要细胞态了
        """unsort: h"""
        ht = torch.transpose(ht, 0, 1)[  # ht=(1,16,300)->(16,1,300)   更换维度之后按照原来的序列还原，x_unsort_idx就是句子在原来batch中的index
            x_unsort_idx]  # (num_layers * num_directions, batch, hidden_size) -> (batch, ...)，转换axis
        ht = torch.transpose(ht, 0, 1)  # ht=(1,16,300)，再转置回来

        if self.only_use_last_hidden_state:  # 如果只用最后一个隐藏状态
            return ht
        else:
            """unpack: out"""
            out = torch.nn.utils.rnn.pad_packed_sequence(out_pack,
                                                         batch_first=self.batch_first)  # (sequence, lengths)  根据给出的PackedSequence进行还原
            out = out[0]  # 返回的是PackedSequence对象，两个成员，一个是填充好的tensor（从大到小排序，16*83*300），另一个指示每个句子包含的token数量(16)
            out = out[x_unsort_idx]  # 恢复原来的次序
            """unsort: out c"""
            if self.rnn_type == 'LSTM':  # 如果是LSTM，还需要返回细胞态
                ct = torch.transpose(ct, 0, 1)[
                    x_unsort_idx]  # (num_layers * num_directions, batch, hidden_size) -> (batch, ...)
                ct = torch.transpose(ct, 0, 1)

            return out, (ht, ct)
