"""
---
title: Long Short-Term Memory (LSTM)
summary: A simple PyTorch implementation/tutorial of Long Short-Term Memory (LSTM) modules.
---

# Long Short-Term Memory (LSTM)

This is a [PyTorch](https://pytorch.org) implementation of Long Short-Term Memory.
"""

from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import Module


class HGRUCell(Module):
    """
    ## High_order Gate Recurrent Unit

    HGRU Cell computes $h$. $h$ update memory.
    Todo: 待更新
    We use the input $x$ and $h$ to update memory.
    In the update, some features of $c$ are cleared with a forget gate $f$,
    and some features $i$ are added through a gate $g$.

    The new short term memory is the $\tanh$ of the long-term memory
    multiplied by the output gate $o$.

    Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it.
    Also $c$ never goes through a linear transformation.
    This is what solves vanishing and exploding gradients.

    Here's the update rule.

    \begin{align}
    V_t &= (1 - \sigma{z_t}) \odot V_{t-1} + \sigma{z_t} \odot  \tilde{V}_t 
    h_t $= \sum{V_t}
    \end{align}

    $\odot$ stands for element-wise multiplication.

    Intermediate values and gates are computed as linear transformations of the hidden
    state and input.

    \begin{align}

    z_t &= lin_x^z(x_t) + lin_V^z(V_{t-1}) \\
    r_t &= lin_x^r(x_t) + lin_V^r(V_{t-1}) \\
    \tilde{V}_t &= lin_x^\tilde{V}(x_t) + lin_V^\tilde{V}(V_{t-1} \odot \sigma{r_t}) \\
    \end{align}
    """

    def __init__(self, input_size: int, hidden_size: int, layer_norm: bool = False):
        super().__init__()

        # These are the linear layer to transform the `input` and `hidden` vectors.
        # One of them doesn't need a bias since we add the transformations.

        # This combines $lin_x^i$, $lin_x^f$, $lin_x^g$, and $lin_x^o$ transformations.
        self.hidden_lin = nn.Linear(hidden_size, 2 * hidden_size)
        # This combines $lin_h^i$, $lin_h^f$, $lin_h^g$, and $lin_h^o$ transformations.
        self.input_lin = nn.Linear(input_size, 2 * hidden_size, bias=False)

        self._V_hidden_lin = nn.Linear(hidden_size, hidden_size)
        self._V_input_lin = nn.Linear(input_size, hidden_size, bias=False)

        # Whether to apply layer normalizations.
        #
        # Applying layer normalization gives better results.
        # $i$, $f$, $g$ and $o$ embeddings are normalized and $c_t$ is normalized in
        # $h_t = o_t \odot \tanh(\mathop{LN}(c_t))$
        if layer_norm:
            self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])
            self.layer_norm_V = nn.LayerNorm(hidden_size)
        else:
            self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(2)])
            self.layer_norm_V = nn.Identity()
        
        # self.V = []

    def forward(self, x: torch.Tensor, V: torch.Tensor, ):
        # x.shape is (b, d_i)
        # h.shape is (b, d_h)
        # We compute the linear transformations for $i_t$, $f_t$, $g_t$ and $o_t$
        # using the same linear layers.
        
        # V = torch.stack(self.V, dim=0) # V, b, d_h
        # print(V.shape)
        # print(self.hidden_lin(V).shape)
        # print(self.input_lin(x).shape)

        zr = self.hidden_lin(V) + self.input_lin(x) # V, b, 2 * d_h
        # print("zr", zr.shape)
        # Each layer produces an output of 2 times the `hidden_size` and we split them
        zr = zr.chunk(2, dim=-1) # ((V, b, d_h), (V, b, d_h))

        # Apply layer normalization (not in original paper, but gives better results)
        zr = [self.layer_norm[i](zr[i]) for i in range(2)]

        # $$i_t, f_t, g_t, o_t$$
        z, r = zr

        _V = self._V_hidden_lin(torch.sigmoid(r) * V) + self._V_input_lin(x) # V, b, d_h
        _V = self.layer_norm_V(_V)

        # $$c_t = \sigma(f_t) \odot c_{t-1} + \sigma(i_t) \odot \tanh(g_t) $$
        V_next = (1 - torch.sigmoid(z)) * V + torch.sigmoid(z) * torch.tanh(_V) # V, b, d_h

        # $$h_t = \sigma(o_t) \odot \tanh(c_t)$$
        # Optionally, apply layer norm to $c_t$
        h_next = torch.sum(V_next, dim=0) # b, d_h
        return h_next


class MGRU(Module):
    """
    ## Mask GRU
    """

    def __init__(self, input_size: int, hidden_size: int, n_layers: int, block_size: int):
        """
        Create a network of `n_layers` of LSTM.
        """

        super().__init__()
        self.n_layers = n_layers
        self.hidden_size = hidden_size
        self.block_size = block_size
        # Create cells for each layer. Note that only the first layer gets the input directly.
        # Rest of the layers get the input from the layer below
        self.cells = nn.ModuleList([HGRUCell(input_size, hidden_size)] +
                                [HGRUCell(hidden_size, hidden_size) for _ in range(n_layers - 1)])

    def forward(self, x: torch.Tensor, state: Optional[torch.Tensor] = None):
        """
        `x` has shape `[n_steps, batch_size, input_size]` and
        `state` is a tuple of $h$ and $c$, each with a shape of `[batch_size, hidden_size]`.
        """
        batch_size, n_steps = x.shape[:2]

        # Initialize the state if `None`
        if state is None:
            state = torch.rand((batch_size, self.hidden_size)).to(x.device)
            # V = [x.new_zeros(n_steps//self.block_size, batch_size, self.hidden_size) for _ in range(self.n_layers)]
            V = [torch.stack([x.new_zeros(batch_size, self.hidden_size), state], dim=0) for _ in range(self.n_layers)]
        else:
            V_1 = state
            # Reverse stack the tensors to get the states of each layer
            #
            # 📝 You can just work with the tensor itself but this is easier to debug
            V_1_list = list(torch.unbind(V))
            V = []
            for sta in V_1_list:
                V.append(torch.stack([x.new_zeros(batch_size, self.hidden_size), sta], dim=0))
            

    
        Mask_h = [[] for _ in range(self.n_layers)] # 保存一个block所有隐藏状态

        # Array to collect the outputs of the final layer at each time step.
        out = []
        for t in range(n_steps):
            # Input to the first layer is the input itself
            inp = x[:, t, :]
            # Loop through the layers
            for layer in range(self.n_layers):
                # update h[layer]
                if t and t % self.block_size == 0:
                    h_block_layer = torch.stack(Mask_h[layer], dim=0).sum(dim=0, keepdim=True) # 目前是求和
                    
                    V[layer] = torch.cat([V[layer], h_block_layer], dim=0)
                    # V[layer][t//self.block_size,:, :] = h_block_layer
                    Mask_h[layer].clear()

                # Get the state of the layer
                h_i_layer = self.cells[layer](inp, V[layer])
                # Input to the next layer is the state of this layer
                Mask_h[layer].append(h_i_layer) # total d 
                inp = h_i_layer
                
            # Collect the output $h$ of the final layer
            if t and t % self.block_size == 0:
                out.append(V[-1])

        # Stack the outputs and states
        out = V[-1] # n_steps//self.block_size, batch_size, self.hidden_size
        V = torch.stack(V)[:, -1, :, :] # n_layers, batch_size, self.hidden_size 取最后一个隐藏状态

        return out.transpose(0, 1), V.transpose(0, 1)
