import torch
import torch.nn as nn

from models.component.Layer import ConvBlock, ResBlock


class Encoder(nn.Module):
    """Discriminator network with PatchGAN.
    W = (W - F + 2P) /S + 1"""

    def __init__(self, in_channels=3, spec_norm=False, LR=0.2):
        super(Encoder, self).__init__()

        self.layer1 = ConvBlock(in_channels, 16, spec_norm, LR=LR)  # 256
        self.layer2 = ConvBlock(16, 16, spec_norm, LR=LR)  # 256
        self.layer3 = ConvBlock(16, 32, spec_norm, stride=2, LR=LR)  # 128
        self.layer4 = ConvBlock(32, 32, spec_norm, LR=LR)  # 128
        self.layer5 = ConvBlock(32, 64, spec_norm, stride=2, LR=LR)  # 64
        self.layer6 = ConvBlock(64, 64, spec_norm, LR=LR)  # 64
        self.layer7 = ConvBlock(64, 128, spec_norm, stride=2, LR=LR)  # 32
        self.layer8 = ConvBlock(128, 128, spec_norm, LR=LR)  # 32
        self.layer9 = ConvBlock(128, 256, spec_norm, stride=2, LR=LR)  # 16
        self.layer10 = ConvBlock(256, 256, spec_norm, LR=LR)  # 16
        self.down_sampling = nn.AdaptiveAvgPool2d((16, 16))

    def forward(self, x):
        feature_map1 = self.layer1(x)
        feature_map2 = self.layer2(feature_map1)
        feature_map3 = self.layer3(feature_map2)
        feature_map4 = self.layer4(feature_map3)
        feature_map5 = self.layer5(feature_map4)
        feature_map6 = self.layer6(feature_map5)
        feature_map7 = self.layer7(feature_map6)
        feature_map8 = self.layer8(feature_map7)
        feature_map9 = self.layer9(feature_map8)
        feature_map10 = self.layer10(feature_map9)

        down_feature_map1 = self.down_sampling(feature_map1)
        down_feature_map2 = self.down_sampling(feature_map2)
        down_feature_map3 = self.down_sampling(feature_map3)
        down_feature_map4 = self.down_sampling(feature_map4)
        down_feature_map5 = self.down_sampling(feature_map5)
        down_feature_map6 = self.down_sampling(feature_map6)
        down_feature_map7 = self.down_sampling(feature_map7)
        down_feature_map8 = self.down_sampling(feature_map8)

        # print("feature_map1.size : ", feature_map1.size()) # torch.Size([2, 16, 256, 256])
        # print("feature_map2.size : ", feature_map2.size()) # torch.Size([2, 16, 256, 256])
        # print("feature_map3.size : ", feature_map3.size()) # torch.Size([2, 32, 128, 128])
        # print("feature_map4.size : ", feature_map4.size()) # torch.Size([2, 32, 128, 128])
        # print("feature_map5.size : ", feature_map5.size()) # torch.Size([2, 64, 64, 64])
        # print("feature_map6.size : ", feature_map6.size()) # torch.Size([2, 64, 64, 64])
        # print("feature_map7.size : ", feature_map7.size()) # feature_map7.size :  torch.Size([2, 128, 32, 32])
        # print("feature_map8.size : ", feature_map8.size()) # feature_map7.size :  torch.Size([2, 128, 32, 32])
        # print("feature_map9.size : ", feature_map9.size()) # feature_map9.size :  torch.Size([2, 256, 16, 16])
        # print("feature_map10.size : ", feature_map10.size()) # feature_map9.size :  torch.Size([2, 256, 16, 16])

        output = torch.cat([down_feature_map1,
                            down_feature_map2,
                            down_feature_map3,
                            down_feature_map4,
                            down_feature_map5,
                            down_feature_map6,
                            down_feature_map7,
                            down_feature_map8,
                            feature_map9,
                            feature_map10,
                            ], dim=1)

        feature_list = [feature_map1,
                        feature_map2,
                        feature_map3,
                        feature_map4,
                        feature_map5,
                        feature_map6,
                        feature_map7,
                        feature_map8,
                        feature_map9,
                        # feature_map10,
                        ]
        # print('output.size : ', output.size()) # output.size :  torch.Size([2, 992, 16, 16])
        b, ch, h, w = output.size()
        # output = output.reshape((b, ch, h * w)) # output.size :  torch.Size([2, 992, 256])
        # output.size :  torch.Size([2, 256, 992])
        # print('output.size : ', output.size())
        return output, feature_list


class ResBlockNet(nn.Module):

    def __init__(self, in_channels, out_channels):
        super(ResBlockNet, self).__init__()
        self.main = list()
        self.main.append(ResBlock(in_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main = nn.Sequential(*self.main)

    def forward(self, x):
        return self.main(x)


class Discriminator(nn.Module):
    """Discriminator network with PatchGAN.
    W = (W - F + 2P) /S + 1"""

    def __init__(self, inplane, spec_norm=True, LR=0.2):
        super(Discriminator, self).__init__()
        self.main = list()
        self.main.append(ConvBlock(inplane, 16, spec_norm, stride=2, LR=LR))  # 256 -> 128
        self.main.append(ConvBlock(16, 32, spec_norm, stride=2, LR=LR))  # 128 -> 64
        self.main.append(ConvBlock(32, 64, spec_norm, stride=2, LR=LR))  # 64 -> 32
        self.main.append(ConvBlock(64, 128, spec_norm, stride=2, LR=LR))  # 32 -> 16
        self.main.append(nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1))
        self.main = nn.Sequential(*self.main)

    def forward(self, x):
        return self.main(x)


# class SDiscriminator(nn.Module):
#     def __init__(self, spec_norm=True, LR=0.2):
#         super().__init__()
#         self.main = nn.Sequential(
#             ConvBlock(4, 16, spec_norm, stride=2, LR=LR),
#             ConvBlock(16, 32, spec_norm, stride=2, LR=LR),
#             ConvBlock(32, 64, spec_norm, stride=2, LR=LR),
#             ConvBlock(64, 128, spec_norm, stride=2, LR=LR),
#             nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1)
#         )
#
#     def forward(self, img, sketch):
#         x = torch.cat([img, sketch], dim=1)
#         return self.main(x)


# class CDiscriminator(nn.Module):
#     def __init__(self, color_nums, spec_norm=True, LR=0.2):
#         super(CDiscriminator, self).__init__()
#         self.mlp = nn.Sequential(
#             nn.Linear(color_nums * 3, color_nums),
#             nn.ReLU(inplace=True),
#             nn.Linear(color_nums, 256),
#             nn.ReLU(inplace=True),
#         )
#         self.main = nn.Sequential(
#             ConvBlock(3, 16, spec_norm, stride=2, LR=LR),
#             ConvBlock(16, 32, spec_norm, stride=2, LR=LR),
#             ConvBlock(32, 64, spec_norm, stride=2, LR=LR),
#             ConvBlock(64, 128, spec_norm, stride=2, LR=LR),
#             nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1)
#         )
#
#         self.score = nn.Sequential(
#             nn.Linear(512,256),
#             nn.ReLU(inplace=True),
#             nn.Linear(256,64),
#             nn.ReLU(inplace=True),
#             nn.Linear(64, 1),
#             nn.ReLU(inplace=True),
#         )
#
#     def forward(self, img, pallete):
#         bs = pallete.shape[0]
#         pallete = torch.reshape(pallete,shape=(bs,-1))
#         plt = self.mlp(pallete)
#         main = self.main(img)
#         bs = main.shape[0]
#         main = torch.reshape(main, (bs,-1))
#         x = torch.cat([plt,main],dim=1)
#         return self.score(x)


class Decoder(nn.Module):
    def __init__(self, spec_norm=False, LR=0.2):
        super(Decoder, self).__init__()
        self.layer10 = ConvBlock(992, 256, spec_norm, LR=LR)  # 16->16
        self.layer9 = ConvBlock(256 + 256, 256, spec_norm, LR=LR)  # 16->16
        self.layer8 = ConvBlock(256 + 128, 128, spec_norm, LR=LR, up=True)  # 16->32
        self.layer7 = ConvBlock(128 + 128, 128, spec_norm, LR=LR)  # 32->32
        self.layer6 = ConvBlock(128 + 64, 64, spec_norm, LR=LR, up=True)  # 32-> 64
        self.layer5 = ConvBlock(64 + 64, 64, spec_norm, LR=LR)  # 64 -> 64
        self.layer4 = ConvBlock(64 + 32, 32, spec_norm, LR=LR, up=True)  # 64 -> 128
        self.layer3 = ConvBlock(32 + 32, 32, spec_norm, LR=LR)  # 128 -> 128
        self.layer2 = ConvBlock(32 + 16, 16, spec_norm, LR=LR, up=True)  # 128 -> 256
        self.layer1 = ConvBlock(16 + 16, 16, spec_norm, LR=LR)  # 256 -> 256
        self.last_conv = nn.Conv2d(16, 3, kernel_size=3, stride=1, padding=1)
        self.tanh = nn.Tanh()

    def forward(self, x, feature_list):
        feature_map10 = self.layer10(x)
        feature_map9 = self.layer9(torch.cat([feature_map10, feature_list[-1]], dim=1))
        feature_map8 = self.layer8(feature_map9, feature_list[-2])
        feature_map7 = self.layer7(torch.cat([feature_map8, feature_list[-3]], dim=1))
        feature_map6 = self.layer6(feature_map7, feature_list[-4])
        feature_map5 = self.layer5(torch.cat([feature_map6, feature_list[-5]], dim=1))
        feature_map4 = self.layer4(feature_map5, feature_list[-6])
        feature_map3 = self.layer3(torch.cat([feature_map4, feature_list[-7]], dim=1))
        feature_map2 = self.layer2(feature_map3, feature_list[-8])
        feature_map1 = self.layer1(torch.cat([feature_map2, feature_list[-9]], dim=1))
        feature_map0 = self.last_conv(feature_map1)
        return self.tanh(feature_map0)


class Generator(nn.Module):
    def __init__(self, spec_norm=False, LR=0.2):
        super(Generator, self).__init__()
        self.encoder_reference = Encoder(in_channels=6, spec_norm=spec_norm, LR=LR)
        self.encoder_sketch = Encoder(in_channels=1, spec_norm=spec_norm, LR=LR)
        self.decoder = Decoder()
        self.res_model = ResBlockNet(992 * 2, 992)

    def forward(self, reference, sketch):
        noise = torch.randn_like(reference)
        reference = torch.cat([reference,noise],dim=1)
        v_r, _ = self.encoder_reference(reference)
        v_s, feature_list = self.encoder_sketch(sketch)
        v_c = torch.cat([v_r, v_s], dim=1)
        rv_c = self.res_model(v_c)
        # concat = torch.cat([rv_c, v_c], dim=1)
        image = self.decoder(rv_c, feature_list)
        return image


if __name__ == '__main__':
    bs = 2
    colors_nums = 50
    input = torch.rand(size=(bs, 3, 256, 256))
    sketch = torch.rand(size=(bs, 1, 256, 256))
    pallete = torch.rand(size=(bs, colors_nums,3))
    g = Generator()
    fake = g(input, sketch)
    print(fake.shape)
