import torch 
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class Mish(nn.Module):
    def __init__(self):
        super(Mish, self).__init__()

    def forward(self, x):
        return x * torch.tanh(F.softplus(x))

class BasicConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1):
        super(BasicConv, self).__init__()

        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.activation = Mish()

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.activation(x)
        return x

class Encoder(nn.Module):
    def __init__(self, num_channels, num_filters):
        super(Encoder, self).__init__()
        #TODO: encoder contains:
        #      1 3x3conv + 1bn + relu + 
        #      1 3x3conv + 1bn + relu +
        #      1 2x2 pool
        # return features before and after pool
        self.conv1 = BasicConv(in_channels=num_channels, 
                               out_channels=num_filters, 
                               kernel_size=3, 
                               stride=1,
                               padding=1)

        self.conv2 = BasicConv(num_filters,
                               num_filters,
                               kernel_size=3,
                               stride=1,
                               padding=1)

        self.pool = nn.MaxPool2d(2, stride=2)

    def forward(self, x):
        # TODO: finish inference part
        x = self.conv1(x)

        x = self.conv2(x)

        x_pooled = self.pool(x)

        return x, x_pooled

class Decoder(nn.Module):
    def __init__(self, num_channels, num_filters):
        super(Decoder, self).__init__()
        # TODO: decoder contains:
        #       1 2x2 transpose conv, stride=2, padding=1, (makes feature map 2x larger)
        #       1 3x3 conv + 1bn + 1relu +
        #       1 3x3 conv + 1bn + 1relu
        self.up = nn.ConvTranspose2d(num_channels,
                                    num_filters,
                                    kernel_size=2,
                                    stride=2)
        self.conv1 = BasicConv(num_channels,
                               num_filters,
                               kernel_size=3,
                               stride=1,
                               padding=1)


        self.conv2 = BasicConv(num_filters,
                               num_filters,
                               kernel_size=3,
                               stride=1,
                               padding=1)

    def forward(self, x1, x2):
        x = self.up(x2)
        h_diff = (x1.shape[2] - x.shape[2])
        w_diff = (x1.shape[3] - x.shape[3])
        zero_padding = nn.ZeroPad2d(padding=[h_diff//2, h_diff - h_diff//2, w_diff//2, w_diff - w_diff//2])
        x = zero_padding(x)
        x = torch.cat((x1, x), dim=1)
        x = self.conv1(x)
        x = self.conv2(x)

        return x

class UNet(nn.Module):
    def __init__(self, num_class=3):
        super(UNet, self).__init__()
        # encoder: 3->64->128->256->512
        # mid : 512->1024->1024
        # decoder: 1024->256->128->64
        # last_conv: 64->num_class

        self.down1 = Encoder(num_channels=3, num_filters=64)
        self.down2 = Encoder(num_channels=64, num_filters=128)
        self.down3 = Encoder(num_channels=128, num_filters=256)
        self.down4 = Encoder(num_channels=256, num_filters=512)

        self.mid_conv1 = BasicConv(512, 1024, kernel_size=1, padding=0, stride=1)

        self.mid_conv2 = BasicConv(1024, 1024, kernel_size=1, padding=0, stride=1)


        self.up4 = Decoder(1024, 512)
        self.up3 = Decoder(512, 256)
        self.up2 = Decoder(256, 128)
        self.up1 = Decoder(128, 64)
    
        self.last_conv = nn.Conv2d(in_channels=64, out_channels=num_class, kernel_size=1)

    def forward(self, x):
        x1, x = self.down1(x)
        #print(x1.shape, x.shape)
        x2, x = self.down2(x)
        #print(x2.shape, x.shape)
        x3, x = self.down3(x)
        #print(x3.shape, x.shape)
        x4, x = self.down4(x)

        # middle layers
        x = self.mid_conv1(x)

        x = self.mid_conv2(x)


        x = self.up4(x4, x)
        #print(x4.shape, x.shape)
        x = self.up3(x3, x)
        #print(x3.shape, x.shape)
        x = self.up2(x2, x)
        #print(x2.shape, x)
        x = self.up1(x1, x)
        #print(x1.shape, x.shape)

        x = self.last_conv(x)
        #print(x.shape)

        return x

def main():
    # device = ("cuda" if torch.has_cuda else "cpu")
    cuda = False
    model = UNet(num_class=3)
    x_data = np.random.rand(1, 3, 123, 123).astype(np.float32) # BCHW
    with torch.no_grad():
        x_data = torch.from_numpy(x_data)
        if cuda:
            x_data = x_data.cuda()
        pred = model(x_data)

    print(pred.shape)


if __name__ == "__main__":
    main()
