import torch
import torch.nn as nn


class encoder(nn.Module):
    def __init__(self):
        super(encoder, self).__init__()

        self.block0 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1, stride=1))
        self.block0.add_module('relu0_1', nn.ReLU(inplace=True))

        self.block1 = nn.Sequential()
        self.block1.add_module('conv1_1', nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1, stride=1))
        self.block1.add_module('relu1_1', nn.ReLU(inplace=True))
        self.block1.add_module('maxpool_1', nn.MaxPool2d(kernel_size=2, stride=2))

        self.block2 = nn.Sequential()
        self.block2.add_module('conv2_1', nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1))
        self.block2.add_module('relu2_1', nn.ReLU(inplace=True))
        self.block2.add_module('conv2_2', nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1))
        self.block2.add_module('relu2_2', nn.ReLU(inplace=True))
        self.block2.add_module('maxpool_2', nn.MaxPool2d(kernel_size=2, stride=2))

        self.block3 = nn.Sequential()
        self.block3.add_module('conv3_1', nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block3.add_module('relu3_1', nn.ReLU(inplace=True))
        self.block3.add_module('conv3_2', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block3.add_module('relu3_2', nn.ReLU(inplace=True))
        self.block3.add_module('conv3_3', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block3.add_module('relu3_3', nn.ReLU(inplace=True))
        self.block3.add_module('maxpool_3', nn.MaxPool2d(kernel_size=2, stride=2))

        self.block4 = nn.Sequential()
        self.block4.add_module('conv4_1', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block4.add_module('relu4_1', nn.ReLU(inplace=True))
        self.block4.add_module('conv4_2', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block4.add_module('relu4_2', nn.ReLU(inplace=True))
        self.block4.add_module('conv4_3', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block4.add_module('relu4_3', nn.ReLU(inplace=True))
        self.block4.add_module('maxpool_4', nn.MaxPool2d(kernel_size=2, stride=2))

        self.block5 = nn.Sequential()
        self.block5.add_module('conv5_1', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block5.add_module('relu5_1', nn.ReLU(inplace=True))
        self.block5.add_module('conv5_2', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block5.add_module('relu5_2', nn.ReLU(inplace=True))
        self.block5.add_module('conv5_3', nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1))
        self.block5.add_module('relu5_3', nn.ReLU(inplace=True))
        self.block5.add_module('maxpool_5', nn.MaxPool2d(kernel_size=2, stride=2))

    def forward(self, x):

        output0 = self.block0(x)
        output1 = self.block1(output0)
        output2 = self.block2(output1)
        output3 = self.block3(output2)
        output4 = self.block4(output3)
        output5 = self.block5(output4)

        return output5, [x, output0, output1, output2, output3, output4]


class decoder(nn.Module):
    def __init__(self):
        super(decoder, self).__init__()

        self.de_block1 = nn.Sequential()
        self.de_block1.add_module('deconv1', nn.ConvTranspose2d(512, 512, stride=2, kernel_size=4, padding=1))
        self.de_block1.deconv1.weight = torch.nn.Parameter(torch.ones(512, 512, 4, 4))
        torch.nn.init.xavier_uniform_(self.de_block1.deconv1.weight)

        self.de_block2 = nn.Sequential()
        self.de_block2.add_module('deconv2', nn.ConvTranspose2d(512, 512, stride=2, kernel_size=4, padding=1))


        self.de_block3 = nn.Sequential()
        self.de_block3.add_module('deconv3', nn.ConvTranspose2d(512, 256, stride=2, kernel_size=4, padding=1))

        self.de_block4 = nn.Sequential()
        self.de_block4.add_module('deconv4', nn.ConvTranspose2d(256, 128, stride=2, kernel_size=4, padding=1))

        self.de_block5 = nn.Sequential()
        self.de_block5.add_module('deconv5', nn.ConvTranspose2d(128, 64, stride=2, kernel_size=4, padding=1))

        self.de_block6 = nn.Sequential()
        self.de_block6.add_module('deconv6', nn.Conv2d(64, 3, kernel_size=1, bias=False))

        self.skip6 = skip_layer_connection(channel=6)
        self.skip5 = skip_layer_connection(channel=128)
        self.skip4 = skip_layer_connection(channel=256)
        self.skip3 = skip_layer_connection(channel=512)
        self.skip2 = skip_layer_connection(channel=1024)
        self.skip1 = skip_layer_connection(channel=1024)

    def forward(self, input_layer, skip_layers):
        x = self.de_block1(input_layer)
        x = self.skip1(x, skip_layers[-1])
        #
        x = self.de_block2(x)
        x = self.skip2(x, skip_layers[-2])
        #
        x = self.de_block3(x)
        x = self.skip3(x, skip_layers[-3])
        #
        x = self.de_block4(x)
        x = self.skip4(x, skip_layers[-4])
        #
        x = self.de_block5(x)
        x = self.skip5(x, skip_layers[-5])
        #
        x = self.de_block6(x)
        x = self.skip6(x, skip_layers[-6])

        return x

class skip_layer_connection(nn.Module):
    def __init__(self, channel):
        super(skip_layer_connection, self).__init__()

        self.skip_conv = nn.Conv2d(in_channels=channel, out_channels= channel // 2, kernel_size=1)
        weight_tensor = torch.unsqueeze(torch.eye(channel // 2), dim=2)
        weight_tensor = torch.unsqueeze(weight_tensor, dim=3)
        weight_tensor = torch.cat([weight_tensor, weight_tensor], dim=1)
        self.skip_conv.weight = torch.nn.Parameter(weight_tensor)
        self.skip_conv.bias = torch.nn.Parameter(torch.zeros_like(self.skip_conv.bias))
        self.relu = nn.ReLU(inplace=True)


    def forward(self, input_layer, skip_layer):

        skip_layer = torch.log(torch.pow(skip_layer, 2) + 1 / 255 * torch.ones_like(skip_layer))
        # print(skip_layer.shape, input_layer.shape)
        concat_layer = torch.cat([skip_layer, input_layer], dim=1)
        output = self.skip_conv(concat_layer)

        return output


Encoder = encoder()

Decoder = decoder()

x = torch.randn(1, 3, 320, 320)

y, z = Encoder(x)


predict = Decoder(y, z)

print(predict.shape)

