import torch
import torch.nn as nn
from models.convlstm import ConvLSTM, ConvLSTMCell
import math


class UnetEncoder(nn.Module):
    """
        The encoder part of UNet
    """
    def __init__(self, in_channels):
        super(UnetEncoder, self).__init__()
        self.in_channels = in_channels
        self.inc = DoubleConv(in_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        self.down4 = Down(512, 1024)

    def forward(self, x):
        """
            Return all the bottleneck feature maps
        """
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)

        return x1, x2, x3, x4, x5
    

class UnetDecoder(nn.Module):
    """
        The decoder part of UNet
    """
    def __init__(self, in_channels, n_classes, bilinear=False):
        super(UnetDecoder, self).__init__()
        assert in_channels % 16 == 0
        self.in_channels = in_channels
        self.bilinear = bilinear
        self.n_classes = n_classes

        factor = 2 if bilinear else 1
        self.up1 = Up(in_channels, in_channels//2//factor, bilinear)
        self.up2 = Up(in_channels//2, in_channels//4//factor, bilinear)
        self.up3 = Up(in_channels//4, in_channels//8//factor, bilinear)
        self.up4 = Up(in_channels//8, in_channels//16//factor, bilinear)
        self.outc = OutConv(in_channels//16//factor, n_classes)

    def forward(self, enc_output):
        x1, x2, x3, x4, x5 = enc_output
        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)

        return logits
    
# A res connect
class Residual(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
        super(Residual, self).__init__()
        self.block_ = nn.Sequential(
            nn.ReLU(True),
            nn.Conv2d(in_channels=in_channels,
                      out_channels=num_residual_hiddens,
                      kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(num_residual_hiddens),
            nn.ReLU(True),
            nn.Conv2d(in_channels=num_residual_hiddens,
                      out_channels=num_hiddens,
                      kernel_size=1, stride=1, bias=False),
            nn.BatchNorm2d(num_hiddens)
        )

    def forward(self, x):
        return self.block_(x) + x


# A Res block
class ResidualStack(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super(ResidualStack, self).__init__()
        self.num_residual_layers = num_residual_layers
        self.layers_ = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)
                                      for _ in range(self.num_residual_layers)])
        
    def forward(self, x):
        for i in range(self.num_residual_layers):
            x = self.layers_[i](x)
        return x
        

# The feature extractor of phase2
class FeatureExtractor(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super(FeatureExtractor, self).__init__()
        self.conv1_ = nn.Sequential(
            nn.Conv2d(in_channels=in_channels,
                      out_channels=num_hiddens//2,
                      kernel_size=4,
                      stride=2,
                      padding=1),
            nn.BatchNorm2d(num_hiddens//2),
            nn.ReLU(True)
        )
        self.conv2_ = nn.Sequential(
            nn.Conv2d(in_channels=num_hiddens//2,
                      out_channels=num_hiddens,
                      kernel_size=4,
                      stride=2,
                      padding=1),
            nn.BatchNorm2d(num_hiddens),
            # nn.ReLU(True)
        )
        self.residual_stack_ = ResidualStack(in_channels=num_hiddens,
                                            num_hiddens=num_hiddens,
                                            num_residual_layers=num_residual_layers,
                                            num_residual_hiddens=num_residual_hiddens)
        
    def forward(self, x):
        x = self.conv1_(x)
        x = self.conv2_(x)
        x = self.residual_stack_(x)
        return x
    

class EncoderLSTM(nn.Module):
    def __init__(self, 
                 in_channels, 
                 hidden_channels, 
                 num_residual_layers=2, 
                 residual_hidden_channels=128,
                 kernel_size=(3,3), 
                 bias=True):
        super(EncoderLSTM, self).__init__()
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.kernel_size = kernel_size
        self.bias = bias
        self.num_residual_layers = num_residual_layers
        self.residual_hidden_channels = residual_hidden_channels

        self.convlstm_ = ConvLSTM(in_channels=self.in_channels,
                                  hidden_channels=self.hidden_channels,
                                  kernel_size=self.kernel_size,
                                  bias=self.bias,
                                  batch_first=True,
                                  bidirectional=True)

    def forward(self, x):
        # x = [b, t, c, h, w]
        b, seq_len, _, h, w = x.size()
        assert w == h, "Here the local map must be a square"

        # encoding phase 1:
        # [b, t, 2*hidden_channels, h, w]
        output, _, _ = self.convlstm_(x)
        last_hidden = output[:, -1]

        return last_hidden


class DecoderLSTM(nn.Module):
    def __init__(self, 
                 out_channels,
                 num_hiddens, 
                 num_residual_layers=2, 
                 num_residual_hiddens=64):
        super(DecoderLSTM, self).__init__()
        self.out_channels = out_channels
        self.num_hiddens = num_hiddens
        self.num_residual_layers = num_residual_layers
        self.num_residual_hiddens = num_residual_hiddens

        self.convlstm_ = ConvLSTMCell(in_channels=self.num_hiddens,
                                      hidden_channels=self.num_hiddens,
                                      kernel_size=(3,3),
                                      bias=True)
        self.residual_stack_ = ResidualStack(in_channels=self.num_hiddens,
                                            num_hiddens=self.num_hiddens,
                                            num_residual_layers=self.num_residual_layers,
                                            num_residual_hiddens=self.num_residual_hiddens)
        self.conv1_ = nn.Sequential(
            nn.Conv2d(in_channels=self.num_hiddens,
                      out_channels=self.num_hiddens//2,
                      kernel_size=3,
                      stride=1,
                      padding=1),
            nn.Conv2d(in_channels=self.num_hiddens//2,
                      out_channels=self.out_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1)
        )
        # self.convlstm_ = ConvLSTMCell(input_dim=self.num_hiddens,
        #                               hidden_dim=self.out_channels,
        #                               kernel_size=(3,3),
        #                               bias=True)

    def forward(self, x, hidden, cell):
        b, _, h, w = x.size()
        hidden, cell = self.convlstm_(input_tensor=x,
                                              state=[hidden, cell])
        output = self.residual_stack_(hidden)
        output = self.conv1_(output)
    
        return output, hidden, cell


if __name__ == "__main__":
    
    # B,T,C,H,W
    test_tensor = torch.zeros(1,1,1,64,64)
    encoder = EncoderLSTM(1, 64)
    decoder = DecoderLSTM(1, 128)
    enc_output = encoder(test_tensor)
    print(enc_output.shape)
    # dec_input = torch.cat([enc_output, enc_output], dim=1)
    dec_input = enc_output
    decoder_hidden = decoder.convlstm_.init_hidden(batch_size=1, image_size=[64, 64])
    dec_output = decoder(dec_input, *decoder_hidden)
    print(dec_output[0].shape)