from collections import OrderedDict

import einops
import torch
from torch import nn


class DCGAN(nn.Module):
    def __init__(
            self,
            latent_dim=100,
            kernel_size=5,
            stride=2,
            last_channel=64,
            num_stages=5,
            output_shape=(3, 128, 128),
            extra_conv=1,
    ):
        super().__init__()
        self.latent_dim = latent_dim
        self.output_shape = output_shape
        self.kernel_size = kernel_size
        self.stride = stride
        # keep edge * channel the same during up sampling
        c, h, w = output_shape
        channels = [last_channel * 2 ** i for i in reversed(range(num_stages))] + [c]
        self.channels = channels
        first_shape = (channels[0], w // 2 ** num_stages, h // 2 ** num_stages)
        self.first_shape = first_shape
        self.ln = nn.Linear(latent_dim, first_shape[0] * first_shape[1] * first_shape[2])
        seq = []
        for i in range(num_stages):
            # TODO:decide the padding and the output padding automatically
            if extra_conv:
                for j in range(extra_conv):
                    seq.append(
                        (f'conv_{i}_{j}',
                         nn.Conv2d(channels[i], channels[i], kernel_size, padding=kernel_size // 2)))
                    seq.append((f'bn_{i}_{j}', nn.BatchNorm2d(channels[i])))
                    seq.append((f'leaky_relu_{i}_{j}', nn.LeakyReLU()))
            seq.append((f'up_sample_{i}',
                        nn.ConvTranspose2d(channels[i], channels[i + 1], kernel_size, stride,
                                           padding=2, output_padding=1)))
            seq.append((f'bn_{i}', nn.BatchNorm2d(channels[i + 1])))
            seq.append((f'relu_{i}', nn.ReLU() if i < num_stages - 1 else nn.Tanh()))

        self.up_sampling_seq = nn.Sequential(OrderedDict(seq))

    def forward(self, z):
        first_img = self.ln(z)
        c, h, w = self.first_shape
        first_img = einops.rearrange(first_img, 'b (c h w) -> b c h w', c=c, h=h, w=w)
        out_img = self.up_sampling_seq(first_img)
        return out_img


class Discriminator(nn.Module):
    def __init__(
            self,
            kernel_size=5,
            stride=2,
            first_channel=64,
            num_stages=5,
            input_shape=(3, 128, 128),
            extra_conv=1,
    ):
        super().__init__()
        self.input_shape = input_shape
        c, h, w = input_shape
        self.kernel_size = kernel_size
        self.stride = stride
        # keep edge * channel the same during up sampling
        channels = [c] + [first_channel * 2 ** i for i in range(num_stages)]
        self.channels = channels
        self.last_shape = (channels[-1], h // 2 ** num_stages, w // 2 ** num_stages)
        seq = []
        for i in range(num_stages):
            if extra_conv:
                for j in range(extra_conv):
                    seq.append(
                        (f'conv_{i}_{j}',
                         nn.Conv2d(channels[i], channels[i], kernel_size, padding=kernel_size // 2)))
                    seq.append((f'bn_{i}_{j}', nn.BatchNorm2d(channels[i])))
                    seq.append((f'leaky_relu_{i}_{j}', nn.LeakyReLU()))
            # TODO:decide the padding automatically
            seq.append((f'down_sample_{i}', nn.Conv2d(channels[i], channels[i + 1], kernel_size, stride, padding=2)))
            seq.append((f'bn_{i}', nn.BatchNorm2d(channels[i + 1])))
            seq.append((f'leaky_relu_{i}', nn.LeakyReLU()))

        self.down_sampling_seq = nn.Sequential(OrderedDict(seq))
        self.ln = nn.Linear(self.last_shape[0] * self.last_shape[1] * self.last_shape[2], 1)

    def forward(self, img):
        c, h, w = self.last_shape
        img = self.down_sampling_seq(img)
        last_img = einops.rearrange(img, 'b c h w -> b (c h w)')
        out_img = self.ln(last_img)
        return out_img


if __name__ == '__main__':
    temp = torch.randn((64, 100))
    dcgan = DCGAN()
    # print(dcgan)
    out = dcgan(temp)
    print('generated:', out.shape)

    discriminator = Discriminator()
    # print(discriminator)
    disc = discriminator(out)
    print('discriminated:', disc.shape)
