from turtle import shape
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F


class Affine(nn.Module):
    def __init__(self, output_dim: int, K: int = 128): # ### 96 or 192
        super().__init__()

        # self.alpha = nn.Parameter(2e-4 * torch.ones(output_dim, K))
        self.alpha = nn.Parameter(2e-4 * torch.ones(K, output_dim))
        self.beta = nn.Parameter(2e-1 * torch.ones(output_dim)) 
        self.scale = nn.Parameter(2e-1 * torch.ones(output_dim))

    def forward(self, x):

        # return F.relu(self.alpha * x + self.beta) + x   
        return F.relu(self.alpha * x +self.beta)*self.scale + x
        # return F.relu(self.alpha * x) + x



class FullyConv(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 dropout: float = 0.2,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=output_dim, kernel_size=11, stride=1, padding=11//2)

        self.activation = nn.LeakyReLU(0.1)

        self.name = 'FullyConv'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        x = self.activation(x)
        x = x.transpose(1, 2)

        return x


class FullyConv4layer(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 dropout: float = 0.2,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv4 = nn.Conv1d(in_channels=hidden_dim, out_channels=output_dim, kernel_size=11, stride=1, padding=11//2)

        self.activation = nn.LeakyReLU(0.1)

        self.name = 'FullyConv4layer'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        x = self.activation(x)
        x = self.conv4(x)
        x = self.activation(x)
        x = x.transpose(1, 2)

        return x


class FullyConvAffine(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 dropout: float = 0.2,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        # self.conv4 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)

        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=output_dim, kernel_size=11, stride=1, padding=11//2)

        self.activation = nn.LeakyReLU(0.1)

        self._affine = Affine(output_dim)
        # self._layerNorm = nn.LayerNorm(output_dim)
        self._dropout = nn.Dropout(0.2)

        self.name = 'FullyConvAffine'

    def forward(self, x):
        # print("x before: ", x.shape)
        x = x.transpose(1, 2)
        # print("x after: ", x.shape)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        # x = self.conv4(x)
        # x = self.activation(x)
        x = self.conv3(x)
        x = self.activation(x)

        x = x.transpose(1, 2)
        x = self._affine(x)
        # x = self._layerNorm(x)
        x = self._dropout(x)

        return x


class LSTM(nn.Module):
    """Benchmark LSTM.

    Parameters:
    input_dim:
        Input dimension.
    hidden_dim:
        Latent dimension.
    output_dim:
        Output dimension.
    num_layers:
        Number of LSTM layers.
    dropout:
        Dropout value. Default is '0.2'.
    bidirectional:
        If 'True', becomes a bidirectional LSTM. Default: 'False'.
    """

    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int = 3,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers=num_layers, dropout=dropout, batch_first=True, bidirectional=bidirectional)

        if bidirectional:
            hidden_dim *= 2
        self.linear = nn.Linear(hidden_dim, output_dim)

        self.name = 'LSTM'

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Propagate input through the network.

        Parameters:
        x:
            Input tensor with shape (m, K, input_dim)

        Returns:
            Output tensor with shape (m, K, output_dim)
        """
        rnn_out, _ = self.rnn(x)
        # print(rnn_out.shape)
        output = self.linear(rnn_out)

        return output



class LSTMaffine(nn.Module):
    """Benchmark LSTM.

    Parameters:
    input_dim:
        Input dimension.
    hidden_dim:
        Latent dimension.
    output_dim:
        Output dimension.
    num_layers:
        Number of LSTM layers.
    dropout:
        Dropout value. Default is '0.2'.
    bidirectional:
        If 'True', becomes a bidirectional LSTM. Default: 'False'.
    """

    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int = 3,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers=num_layers, dropout=dropout, batch_first=True, bidirectional=bidirectional)

        if bidirectional:
            hidden_dim *= 2
        self.linear = nn.Linear(hidden_dim, output_dim)
        self._affine = Affine(output_dim)

        self.name = 'LSTMaffine'

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Propagate input through the network.

        Parameters:
        x:
            Input tensor with shape (m, K, input_dim)

        Returns:
            Output tensor with shape (m, K, output_dim)
        """
        rnn_out, _ = self.rnn(x)
        # print(rnn_out.shape)
        output = self.linear(rnn_out)
        # print(output.shape)
        output = self._affine(output)

        return output



class BiGRU(LSTM):
    """Benchmark Bidirictionnal GRU.

    Parameters:
    input_dim:
        Input dimension.
    hidden_dim:
        Latent dimension.
    output_dim:
        Output dimension.
    num_layers:
        Number of GRU layers.
    dropout:
        Dropout value. Default is '0.2'.
    bidirectional:
        If 'True', becomes a bidirectional GRU. Default: 'True'.
    """

    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(input_dim, hidden_dim, output_dim, num_layers, dropout, bidirectional, **kwargs)

        self.rnn = nn.GRU(input_dim, hidden_dim, num_layers=num_layers, dropout=dropout, batch_first=True, bidirectional=bidirectional)


        self.name = 'GRU'


class BiGRUaffine(LSTMaffine):
    """Benchmark Bidirictionnal GRU.

    Parameters:
    input_dim:
        Input dimension.
    hidden_dim:
        Latent dimension.
    output_dim:
        Output dimension.
    num_layers:
        Number of GRU layers.
    dropout:
        Dropout value. Default is '0.2'.
    bidirectional:
        If 'True', becomes a bidirectional GRU. Default: 'True'.
    """

    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(input_dim, hidden_dim, output_dim, num_layers, dropout, bidirectional, **kwargs)

        self.rnn = nn.GRU(input_dim, hidden_dim, num_layers=num_layers, dropout=dropout, batch_first=True, bidirectional=bidirectional)

        self.name = 'GRUaffine'



class cnnGru(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                #  bidirectional: bool = False,
                 bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        # self.conv3 = nn.Sequential(nn.Conv1d(hidden_dim, hidden_dim, 11, stride=1, padding=11//2),
        #                             nn.BatchNorm1d(hidden_dim),
        #                             nn.Sigmoid())
                        # nn.AvgPool1d(11, stride=1, padding=11//2), 

        self.activation = nn.LeakyReLU(0.1)

        self.rnn = BiGRU(hidden_dim, hidden_dim, output_dim, num_layers, dropout=dropout, bidirectional=bidirectional)

        self.name = 'cnnGru'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        # x = self.activation(x)
        x = x.transpose(1, 2)

        x = self.rnn(x)

        return x


class cnnGruaffine(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                #  bidirectional: bool = False,
                 bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        # self.conv3 = nn.Sequential(nn.Conv1d(hidden_dim, hidden_dim, 11, stride=1, padding=11//2),
        #                             nn.BatchNorm1d(hidden_dim),
        #                             nn.Sigmoid())
                        # nn.AvgPool1d(11, stride=1, padding=11//2), 

        self.activation = nn.LeakyReLU(0.1)

        self.rnn = BiGRU(hidden_dim, hidden_dim, output_dim, num_layers, dropout=dropout, bidirectional=bidirectional)
        
        self._affine = Affine(output_dim)

        self.name = 'cnnGruaffine'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        # x = self.activation(x)
        x = x.transpose(1, 2)

        x = self.rnn(x)
        x = self._affine(x)


        return x


class cnnLSTM(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)

        self.activation = nn.LeakyReLU(0.1)

        self.rnn = LSTM(hidden_dim, hidden_dim, output_dim, num_layers, dropout=dropout, bidirectional=bidirectional)

        self.name = 'cnnLSTM'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        x = self.activation(x)
        x = x.transpose(1, 2)

        x = self.rnn(x)      

        return x


class cnnLSTMaffine(nn.Module):
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0.2,
                 bidirectional: bool = False,
                #  bidirectional: bool = True,
                 **kwargs):
        super().__init__(**kwargs)

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv2 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)
        self.conv3 = nn.Conv1d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=11, stride=1, padding=11//2)

        self.activation = nn.LeakyReLU(0.1)

        self.rnn = LSTM(hidden_dim, hidden_dim, output_dim, num_layers, dropout=dropout, bidirectional=bidirectional)
        self._affine = Affine(output_dim)
        self._dropout = nn.Dropout(0.2)

        self.name = 'cnnLSTMaffine'

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv1(x)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.activation(x)
        x = self.conv3(x)
        x = self.activation(x)
        x = x.transpose(1, 2)

        x = self.rnn(x)
        x = self._affine(x)
        x = self._dropout(x)
        
        return x


###-------------------------------


class Conv1dGRUCell(nn.Module):
    def __init__(self, dim_in, in_channels, out_channels, kernel_size, bias):
        """
        :param input_size: int
            Dimension of input tensor.
        :param in_channels: int
            Number of channels of input tensor.
        :param out_channels: int
            Number of channels of output tensor as well as hidden state.
        :param kernel_size: int
            Size of the convolutional kernel.
        :param stride: int
            Size of the convolutional stride.
        :param padding: int
            Size of the convolutional padding.
        :param bias: bool
            Whether or not to include the bias.
        :param dtype: torch.cuda.FloatTensor or torch.FloatTensor
            Whether or not to use cuda.
        """
        super(Conv1dGRUCell, self).__init__()
        self.dim_in = dim_in        #### input sequence length
        self.padding = kernel_size // 2
        self.out_channels = out_channels
        self.bias = bias

        # self.i2h_conv = nn.Conv1d(in_channels, out_channels*3, kernel_size, 1, self.padding, bias=bias, groups=in_channels)
        # self.h2h_conv = nn.Conv1d(out_channels, out_channels*3, kernel_size, 1, self.padding, bias=bias, groups=out_channels)
        self.i2h_conv = nn.Conv1d(in_channels, out_channels*3, kernel_size, 1, self.padding, bias=bias)
        self.h2h_conv = nn.Conv1d(out_channels, out_channels*3, kernel_size, 1, self.padding, bias=bias)

    def init_hidden(self, batch_size, device):
        return torch.zeros(batch_size, self.out_channels, self.dim_in, device=device)

    def forward(self, x, h_prev):
        """
        :param self:
        :param x: (N, C_in, W)
            input tensor
        :param h_prev: (N, C_hidden, W)
            tensor of hidden state of previous step
        :return h_next: (N, C_hidden, W)
            tensor of hidden state of next step
        """
        rzn_i = self.i2h_conv(x)  
        # print("rzn_i shapes:  ", rzn_i)
        rzn_h = self.h2h_conv(h_prev)

        # print("x shapes: ", x.shape)
        # print("self.out_channels: ", self.out_channels)
        # # attention the dimension of self.out_channels

        ri, zi, ni = torch.split(rzn_i, self.out_channels, dim=1)
        rh, zh, nh = torch.split(rzn_h, self.out_channels, dim=1)

        n = torch.tanh(ni + torch.sigmoid(ri + rh) * nh)
        z = torch.sigmoid(zi + zh)
        h_next = (1 - z) * n + z * h_prev

        return h_next


class Conv1dGRU(nn.Module):
    def __init__(self, dim_in, in_channels, out_channels, kernel_size, num_layers, bias):
        """
        :param dim_in: int
            Dimension of input tensor.      #### input sequence length
        :param in_channels: int
            Number of channels of input tensor.
        :param out_channels: int
            Number of channels of output tensor as well as hidden state.
        :param kernel_size: int
            Size of convolutional kernel.
        :param num_layers: int
            Number of ConvGRU layers.
        :param bias: bool
            Whether or not to add the bias.
        """
        super(Conv1dGRU, self).__init__()

        if isinstance(out_channels, int):
            out_channels_list = [out_channels] * num_layers
        else:
            assert isinstance(out_channels, list) and len(out_channels) == num_layers
            out_channels_list = out_channels
        in_channels_list = [in_channels] + out_channels_list[:-1]

        if not len(in_channels_list) == num_layers:
            raise ValueError("Inconsistent list length.")

        cell_list = [Conv1dGRUCell(dim_in, cin, cout, kernel_size, bias)
                     for cin, cout in zip(in_channels_list, out_channels_list)]

        self.cell_list = nn.ModuleList(cell_list)
        self.num_layers = num_layers

    def forward(self, input_tensor):
        """
        :param input_tensor: (N, Cin, W, T)
            input tensor.
        :param hidden_state: (N, C_hidden, W, num_layers)
            tensor of hidden state of previous step.
        :return output_tensor: (N, Cout, W, T)
            output tensor.
        """

        hidden_state_list = self._init_hidden(input_tensor.size(1), input_tensor.device)
        seq_len = input_tensor.size(0)

        input_layer_list = torch.unbind(input_tensor, dim=0)
        for i in range(self.num_layers):
            h = hidden_state_list[i]
            output_layer_list = []
            for t in range(seq_len):
                h = self.cell_list[i](input_layer_list[t], h)
                output_layer_list.append(h)
            input_layer_list = output_layer_list
        output_tensor = torch.stack(output_layer_list, dim=0)

        '''
        h_in_list = hidden_state_list
        output_list = []
        for t in range(seq_len):
            input = input_tensor[t, :, :, :]
            h_out_list = []
            for i in range(self.num_layers):
                output = self.cell_list[i](input, h_in_list[i])
                input = output
                h_out_list.append(output)
            h_in_list = h_out_list
            output_list.append(output)
        output_tensor = torch.stack(output_list, dim=0)
        '''
        return output_tensor

    def _init_hidden(self, batch_size, device):
        init_states_list = [c.init_hidden(batch_size, device)  for c in self.cell_list]
        return init_states_list

    def initialize(self):
        for name, param in self.named_parameters():
            # print(name, param.shape)
            if 'i2h' in name:
                if 'weight' in name:
                    nn.init.xavier_uniform_(param)
                if 'bias' in name:
                    nn.init.zeros_(param)
            if 'h2h' in name:
                if 'weight' in name:
                    nn.init.orthogonal_(param)
                if 'bias' in name:
                    nn.init.zeros_(param)
####-----------------------------------------



class convGRU(Conv1dGRU):
    """Benchmark cnnGRU.
    Parameters:
    :param dim_in: int
        Dimension of input tensor.
    :param in_channels: int
        Number of channels of input tensor.
    :param out_channels: int
        Number of channels of output tensor as well as hidden state.
    :param kernel_size: int
        Size of convolutional kernel.
    :param num_layers: int
        Number of ConvGRU layers.
    :param bias: bool
        Whether or not to add the bias.
        If 'True', add bias. Default: 'True'.
    """
    def __init__(self,
                 dim_in: int,  #### input sequence length
                 in_channels: int,
                 out_channels: int,
                 kernel_size: int,
                 num_layers: int,
                 bias: bool = True):
    # def __init__():

        super().__init__(dim_in, in_channels, out_channels, kernel_size, num_layers, bias)
        self.rnn = Conv1dGRU(dim_in, in_channels, out_channels, kernel_size, num_layers, bias=True)   

        self.name = 'convGRU'

    def forward(self, input_tensor):
        """
        :param input_tensor: (B, Cin, W)
            input tensor.
        :param tensor : (N, B, Cin, W)
            tensor of transformerd.

        :return output_tensor: (B, Cin, W)
            output tensor.
        """
        # print("0 input_tensor shapes: ", input_tensor.shape)
        input_tensor = input_tensor.permute(0, 2, 1)
        # print("1 input_tensor shapes: ", input_tensor.shape)
        input_tensor = torch.unsqueeze(input_tensor, 0)
        # print("input_tensor shapes: ", input_tensor.shape)
        output = self.rnn(input_tensor)
        # print("2 input_tensor shapes: ", input_tensor.shape)
        output = torch.squeeze(output, 0)
        # print("3 input_tensor shapes: ", input_tensor.shape)
        output = output.permute(0, 2, 1)

        return output

## examples
# rnn = convGRU(128, 3, 6, 11, 2, True)
# input = torch.randn(20, 128, 3)
# output  = rnn(input)


class convGRUaffine(Conv1dGRU):
    """Benchmark cnnGRU.
    Parameters:
    :param dim_in: int
        Dimension of input tensor.
    :param in_channels: int
        Number of channels of input tensor.
    :param out_channels: int
        Number of channels of output tensor as well as hidden state.
    :param kernel_size: int
        Size of convolutional kernel.
    :param num_layers: int
        Number of ConvGRU layers.
    :param bias: bool
        Whether or not to add the bias.
        If 'True', add bias. Default: 'True'.
    """
    def __init__(self,
                 dim_in: int,  #### input sequence length
                 in_channels: int,
                 out_channels: int,
                 kernel_size: int,
                 num_layers: int,
                 bias: bool = True):
    # def __init__():

        super().__init__(dim_in, in_channels, out_channels, kernel_size, num_layers, bias)
        self.rnn = Conv1dGRU(dim_in, in_channels, out_channels, kernel_size, num_layers, bias=True)

        self._affine = Affine(out_channels)   
        # self._dropout = nn.Dropout(0.2)

        self.name = 'convGRUaffine'

    def forward(self, input_tensor):
        """
        :param input_tensor: (B, Cin, W)
            input tensor.
        :param tensor : (N, B, Cin, W)
            tensor of transformerd.

        :return output_tensor: (B, Cin, W)
            output tensor.
        """
        # print("0 input_tensor shapes: ", input_tensor.shape)
        input_tensor = input_tensor.permute(0, 2, 1)
        # print("1 input_tensor shapes: ", input_tensor.shape)
        input_tensor = torch.unsqueeze(input_tensor, 0)
        # print("input_tensor shapes: ", input_tensor.shape)
        output = self.rnn(input_tensor)
        # print("2 input_tensor shapes: ", input_tensor.shape)
        output = torch.squeeze(output, 0)
        # print("3 input_tensor shapes: ", input_tensor.shape)
        output = output.permute(0, 2, 1)
        
        output = self._affine(output)
        # output = self._dropout(output)


        return output




class convLSTMCell(nn.Module):

    def __init__(self, input_dim, hidden_dim, kernel_size, bias):
        """
        Initialize ConvLSTM cell.
        Parameters:
        input_dim: int
            Number of channels of input tensor.
        hidden_dim: int
            Number of channels of hidden state.
        kernel_size: (int, int)
            Size of the convolutional kernel.
        bias: bool
            Whether or not to add the bias.
        """

        super(convLSTMCell, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim

        self.kernel_size = kernel_size
        self.padding = kernel_size[0] // 2, kernel_size[1] // 2
        self.bias = bias

        self.conv1 = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)
        self.conv2 = nn.Conv2d(in_channels=4 * self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)
        self.conv3 = nn.Conv2d(in_channels=4 * self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)

        self.activation = nn.LeakyReLU(0.1)
        #### add affine
        # self._affine = Affine(3)
        # self._dropput = nn.Dropout(0.2)

    def forward(self, input_tensor, cur_state):
        h_cur, c_cur = cur_state

        combined = torch.cat([input_tensor, h_cur], dim=1)  # concatenate along channel axis

        combined_conv = self.conv1(combined)
        # combined_conv = self.activation(combined_conv)
        # combined_conv = self.conv2(combined_conv)
        # combined_conv = self.activation(combined_conv)
        # combined_conv = self.conv3(combined_conv)
        # combined_conv = self.activation(combined_conv)

        cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)

        i = torch.sigmoid(cc_i)
        f = torch.sigmoid(cc_f)
        o = torch.sigmoid(cc_o)
        g = torch.tanh(cc_g)

        c_next = f * c_cur + i * g
        h_next = o * torch.tanh(c_next)   ### h_next [1, 3, 1, 128]
        #####  add affine
        # h_next = self._affine(h_next)
        # h_next = self._dropput(h_next)

        return h_next, c_next

    def init_hidden(self, batch_size, image_size):
        height, width = image_size
        return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv1.weight.device),
                torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv1.weight.device))


class convLSTM(nn.Module):

    """
    Parameters:
        input_dim: Number of channels in input
        hidden_dim: Number of hidden channels
        kernel_size: Size of kernel in convolutions
        num_layers: Number of LSTM layers stacked on each other
        batch_first: Whether or not dimension 0 is the batch or not
        bias: Bias or no bias in Convolution
        return_all_layers: Return the list of computations for all layers
        Note: Will do same padding.
    Input:
        A tensor of size B, T, C, H, W or T, B, C, H, W
    Output:
        A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
            0 - layer_output_list is the list of lists of length T of each output
            1 - last_state_list is the list of last states
                    each element of the list is a tuple (h, c) for hidden state and memory
    Example:
        >> x = torch.rand((32, 10, 64, 128, 128))
        >> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
        >> _, last_states = convlstm(x)
        >> h = last_states[0][0]  # 0 for layer index, 0 for h index
    """

    def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
                 batch_first=False, bias=True, return_all_layers=False):
        super(convLSTM, self).__init__()

        self._check_kernel_size_consistency(kernel_size)

        # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
        kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
        hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
        if not len(kernel_size) == len(hidden_dim) == num_layers:
            raise ValueError('Inconsistent list length.')

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.bias = bias
        self.return_all_layers = return_all_layers

        cell_list = []

        for i in range(0, self.num_layers):
            cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]

            cell_list.append(convLSTMCell(input_dim=cur_input_dim, hidden_dim=self.hidden_dim[i],
                                          kernel_size=self.kernel_size[i], bias=self.bias))

        self.cell_list = nn.ModuleList(cell_list)
        # self._affine =Affine()
        # self._dropout = nn.Dropout(0.2)

       

    def forward(self, input_tensor, hidden_state=None):
        """
        Parameters:
        input_tensor: todo
            5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
        hidden_state: todo
            None. todo implement stateful
        Returns:
        last_state_list, layer_output
        """
        input_tensor = input_tensor.permute(0,2,1)
        input_tensor = torch.unsqueeze(input_tensor, 2)
        input_tensor = torch.unsqueeze(input_tensor, 0)
        # current_input = torch.unsqueeze(current_input, 1)

        if not self.batch_first:
            # (t, b, c, h, w) -> (b, t, c, h, w)
            input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

        b, _, _, h, w = input_tensor.size()

        # Implement stateful ConvLSTM
        if hidden_state is not None:
            raise NotImplementedError()
        else:
            # Since the init is done in forward. Can send image size here
            hidden_state = self._init_hidden(batch_size=b, image_size=(h, w))

        layer_output_list = []
        last_state_list = []

        seq_len = input_tensor.size(1)
        cur_layer_input = input_tensor

        for layer_idx in range(self.num_layers):

            h, c = hidden_state[layer_idx]
            output_inner = []
            for t in range(seq_len):
                h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c])
                output_inner.append(h)

            layer_output = torch.stack(output_inner, dim=1)
            cur_layer_input = layer_output

            layer_output_list.append(layer_output)
            last_state_list.append([h, c])


        if not self.return_all_layers:
            layer_output_list = layer_output_list[-1:]
            last_state_list = last_state_list[-1:]

        # #### reshape         
        # last_state_list = torch.squeeze(last_state_list)
        # last_state_list = last_state_list.permute(0,2,1)

        # layer_output_list = torch.squeeze(layer_output_list)
        # layer_output_list = layer_output_list.permute(0,2,1)

        # return layer_output_list, last_state_list

        return layer_output_list, last_state_list


    def _init_hidden(self, batch_size, image_size):
        init_states = []
        for i in range(self.num_layers):
            init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
        return init_states

    @staticmethod
    def _check_kernel_size_consistency(kernel_size):
        if not (isinstance(kernel_size, tuple) or
                (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
            raise ValueError('`kernel_size` must be tuple or list of tuples')

    @staticmethod
    def _extend_for_multilayer(param, num_layers):
        if not isinstance(param, list):
            param = [param] * num_layers
        return param


# if __name__ == "__main__":
    # os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
    # torch.backends.cudnn.benchmark = True

    # x = torch.rand((1, 12, 3))
    # convlstm = convLSTM(3, 2, (1, 11), 3, True, True, False)
    # output, last_state = convlstm(x)
    # h = last_state[0][0]  # 0 for layer index, 0 for h index

    # print(np.array(output, dtype=object).size)
    # print(np.array(output, dtype=object).shape, output)    
    # print(np.array(last_state, dtype=object).shape, last_state)



class convLSTMCellaffine(nn.Module):

    def __init__(self, input_dim, hidden_dim, kernel_size, bias):
        """
        Initialize ConvLSTM cell.
        Parameters
        ----------
        input_dim: int
            Number of channels of input tensor.
        hidden_dim: int
            Number of channels of hidden state.
        kernel_size: (int, int)
            Size of the convolutional kernel.
        bias: bool
            Whether or not to add the bias.
        """

        super(convLSTMCellaffine, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim

        self.kernel_size = kernel_size
        self.padding = kernel_size[0] // 2, kernel_size[1] // 2
        self.bias = bias

        self.conv1 = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)
        self.conv2 = nn.Conv2d(in_channels=4 * self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)
        self.conv3 = nn.Conv2d(in_channels=4 * self.hidden_dim,
                              out_channels=4 * self.hidden_dim,
                              kernel_size=self.kernel_size,
                              padding=self.padding,
                              bias=self.bias)

        self.activation = nn.LeakyReLU(0.2)
        # self._norm = nn.BatchNorm2d(4 * self.hidden_dim)

        self._affine = Affine(hidden_dim)
        # self._dropput = nn.Dropout(0.2)
        

    def forward(self, input_tensor, cur_state):
        h_cur, c_cur = cur_state

        combined = torch.cat([input_tensor, h_cur], dim=1)  # concatenate along channel axis

        combined_conv = self.conv1(combined)
        # combined_conv = self.activation(combined_conv)
        # combined_conv = self.conv2(combined_conv)
        combined_conv = self.activation(combined_conv)
        # combined_conv = self._norm(combined_conv)
        # combined_conv = self.activation(combined_conv)

        cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)

        i = torch.sigmoid(cc_i)
        f = torch.sigmoid(cc_f)
        o = torch.sigmoid(cc_o)
        g = torch.tanh(cc_g)

        c_next = f * c_cur + i * g
        h_next = o * torch.tanh(c_next)   ### h_next [1, 3, 1, 128]

        #####--------------------
        # print("1...", h_next.shape)
        h_next = torch.squeeze(h_next, 0)
        h_next = h_next.permute(1, 2, 0)
        # print("2...", h_next.shape)
        # h_next = h_next.permute(1, 0)
        # h_next = h_next.permute(2, 1)
        # print("3...", h_next.shape)
        # h_next = torch.unsqueeze(h_next, 0)
        h_next = self._affine(h_next)
        # h_next = self._dropput(h_next)

        # print("4...", h_next.shape)
        h_next = h_next.permute(2, 0, 1)
        # print("5...", h_next.shape)
        h_next = torch.unsqueeze(h_next, 0)


        return h_next, c_next

    def init_hidden(self, batch_size, image_size):
        height, width = image_size
        return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv1.weight.device),
                torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv1.weight.device))


class convLSTMaffine(nn.Module):

    """
    Parameters:
        input_dim: Number of channels in input
        hidden_dim: Number of hidden channels
        kernel_size: Size of kernel in convolutions
        num_layers: Number of LSTM layers stacked on each other
        batch_first: Whether or not dimension 0 is the batch or not
        bias: Bias or no bias in Convolution
        return_all_layers: Return the list of computations for all layers
        Note: Will do same padding.
    Input:
        A tensor of size B, T, C, H, W or T, B, C, H, W
    Output:
        A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
            0 - layer_output_list is the list of lists of length T of each output
            1 - last_state_list is the list of last states
                    each element of the list is a tuple (h, c) for hidden state and memory
    Example:
        >> x = torch.rand((32, 10, 64, 128, 128))
        >> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
        >> _, last_states = convlstm(x)
        >> h = last_states[0][0]  # 0 for layer index, 0 for h index
    """

    def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
                 batch_first=False, bias=True, return_all_layers=False):
        super(convLSTMaffine, self).__init__()

        self._check_kernel_size_consistency(kernel_size)

        # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
        kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
        hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
        if not len(kernel_size) == len(hidden_dim) == num_layers:
            raise ValueError('Inconsistent list length.')

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.bias = bias
        self.return_all_layers = return_all_layers

        cell_list = []

        for i in range(0, self.num_layers):
            cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]

            cell_list.append(convLSTMCellaffine(input_dim=cur_input_dim, hidden_dim=self.hidden_dim[i],
                                          kernel_size=self.kernel_size[i], bias=self.bias))

        self.cell_list = nn.ModuleList(cell_list)
        # self._affine =Affine()
        # self._dropout = nn.Dropout(0.2)

       

    def forward(self, input_tensor, hidden_state=None):
        """
        Parameters
        ----------
        input_tensor: todo
            5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
        hidden_state: todo
            None. todo implement stateful
        Returns
        -------
        last_state_list, layer_output
        """
        input_tensor = input_tensor.permute(0,2,1)
        input_tensor = torch.unsqueeze(input_tensor, 2)
        input_tensor = torch.unsqueeze(input_tensor, 0)
        # current_input = torch.unsqueeze(current_input, 1)

        if not self.batch_first:
            # (t, b, c, h, w) -> (b, t, c, h, w)
            input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

        b, _, _, h, w = input_tensor.size()

        # Implement stateful ConvLSTM
        if hidden_state is not None:
            raise NotImplementedError()
        else:
            # Since the init is done in forward. Can send image size here
            hidden_state = self._init_hidden(batch_size=b, image_size=(h, w))

        layer_output_list = []
        last_state_list = []

        seq_len = input_tensor.size(1)
        cur_layer_input = input_tensor

        for layer_idx in range(self.num_layers):

            h, c = hidden_state[layer_idx]
            output_inner = []
            for t in range(seq_len):
                h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c])
                output_inner.append(h)

            layer_output = torch.stack(output_inner, dim=1)
            cur_layer_input = layer_output


            layer_output_list.append(layer_output)
            last_state_list.append([h, c])



        if not self.return_all_layers:
            layer_output_list = layer_output_list[-1:]
            last_state_list = last_state_list[-1:]

        # #### reshape         
        # last_state_list = torch.squeeze(last_state_list)
        # last_state_list = last_state_list.permute(0,2,1)

        # layer_output_list = torch.squeeze(layer_output_list)
        # layer_output_list = layer_output_list.permute(0,2,1)

        # return layer_output_list, last_state_list

        return layer_output_list, last_state_list


    def _init_hidden(self, batch_size, image_size):
        init_states = []
        for i in range(self.num_layers):
            init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
        return init_states

    @staticmethod
    def _check_kernel_size_consistency(kernel_size):
        if not (isinstance(kernel_size, tuple) or
                (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
            raise ValueError('`kernel_size` must be tuple or list of tuples')

    @staticmethod
    def _extend_for_multilayer(param, num_layers):
        if not isinstance(param, list):
            param = [param] * num_layers
        return param



class FFN(nn.Module):

    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 num_layers: int,
                 dropout: float = 0,
                 **kwargs):
        super().__init__(**kwargs)

        layer_dim = [hidden_dim for _ in range(num_layers)]
        layer_dim[0] = input_dim
        layer_dim[-1] = output_dim

        self.layers_dense = nn.ModuleList([nn.Linear(layer_dim[i],
                                                     layer_dim[i+1]) for i in range(num_layers-1)])

        self.name = 'FFN'

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        for layer in self.layers_dense:
            x = layer(x)
        return x


