from collections import OrderedDict

import einops
import torch
from torch import nn
from torchvision.transforms.functional import gaussian_blur

EPS = 1e-8


def adaIN(x: torch.Tensor, y: torch.Tensor):
    assert len(x.shape) == 4, 'not correct feature map'
    assert len(y) == 2, 'not correct y'
    assert y[0].shape[1] == x.shape[1], 'not correct y'
    # according to the article
    ys, yb = y
    ys = einops.rearrange(ys, 'b c -> b c 1 1')
    yb = einops.rearrange(yb, 'b c -> b c 1 1')
    x_mean = torch.mean(x, dim=(-2, -1), keepdim=True)
    x_std = torch.std(x, dim=(-2, -1), keepdim=True)
    x = (x - x_mean) / x_std * ys + yb
    return x


def pixelnorm(x):
    return x * torch.rsqrt(torch.mean(x, dim=1, keepdim=True) + EPS)


class Z_mapper(nn.Module):
    def __init__(self, latent_dim=64, num_stages=8, leaky_alpha=0.2):
        super(Z_mapper, self).__init__()
        seq = []
        for i in range(num_stages):
            seq.append(nn.Linear(latent_dim, latent_dim))
            seq.append(nn.LeakyReLU(leaky_alpha))
        self.func = nn.Sequential(*seq)

    def forward(self, z):
        z = pixelnorm(z)
        return self.func(z)


class Upsample(nn.Module):
    def __init__(self, scale=2, blur_size=3):
        super(Upsample, self).__init__()
        self.scale = scale
        self.blur_size = blur_size

    def forward(self, x):
        assert len(x.shape) == 4, 'not correct feature map'
        x = torch.tile(x, [1, 1, self.scale, self.scale])
        x = gaussian_blur(x, self.blur_size)
        return x


class Style_block(nn.Module):
    def __init__(self, latent_dim=64, kernel_size=3, leaky_alpha=0.2):
        super(Style_block, self).__init__()
        self.latent_dim = latent_dim
        self.upsample = Upsample()
        self.style_ln1 = nn.Linear(latent_dim, 2 * latent_dim)
        self.style_ln2 = nn.Linear(latent_dim, 2 * latent_dim)
        self.conv1 = nn.Conv2d(latent_dim, latent_dim, kernel_size, padding=kernel_size // 2)
        self.conv2 = nn.Conv2d(latent_dim, latent_dim, kernel_size, padding=kernel_size // 2)
        self.leaky_relu = nn.LeakyReLU(leaky_alpha)
        self.noise_scale = nn.Parameter(torch.zeros(2))

    def forward(self, x, w, noise):
        x = self.upsample(x)

        y = self.style_ln1(w)
        ys, yz = y[:, :self.latent_dim], y[:, self.latent_dim:]
        x = self.leaky_relu(self.conv1(x))
        x = x + noise[0] * self.noise_scale[0]
        x = adaIN(x, [ys, yz])

        y = self.style_ln2(w)
        ys, yz = y[:, :self.latent_dim], y[:, self.latent_dim:]
        x = self.leaky_relu(self.conv2(x))
        x = x + noise[0] * self.noise_scale[0]
        x = adaIN(x, [ys, yz])
        return x


class StyleGAN(nn.Module):
    def __init__(
            self,
            latent_dim=64,
            kernel_size=3,
            num_stages=6,
            mapper_stages=8,
            start_shape=(4, 4),
            output_shape=(3, 128, 128),
            leaky_alpha=0.2,
    ):
        super().__init__()
        assert start_shape[-1] * 2**(num_stages-1)==output_shape[-1]
        self.latent_dim = latent_dim
        self.output_shape = output_shape
        self.kernel_size = kernel_size
        # keep channel the same during up sampling
        c, h, w = output_shape
        self.start_shape = start_shape
        self.z_mapper = Z_mapper(latent_dim, mapper_stages, leaky_alpha)
        self.start_constant = nn.Parameter(torch.randn((1, latent_dim) + start_shape))

        seq = []
        for i in range(1, num_stages):
            # TODO:decide the padding and the output padding automatically
            seq.append((f'style_block_{i}', Style_block(latent_dim, kernel_size, leaky_alpha)))

        self.style_blocks = nn.ModuleDict(OrderedDict(seq))
        self.to_rgb = nn.Conv2d(latent_dim, 3, 1)

    def forward(self, z):
        b, d = z.shape
        assert d == self.latent_dim, 'wrong z'
        w = self.z_mapper(z)
        img = einops.repeat(self.start_constant, '1 c h w -> b c h w', b=b)
        for name in self.style_blocks:
            noise = torch.randn([2])
            img = self.style_blocks[name](img, w, noise)

        return self.to_rgb(img)

# TODO:we need a discriminator for styleGAN, maybe not the same as DCGAN will be better
class Discriminator(nn.Module):
    def __init__(
            self,
            kernel_size=5,
            stride=2,
            first_channel=64,
            num_stages=5,
            input_shape=(3, 128, 128),
    ):
        super().__init__()
        self.input_shape = input_shape
        c, h, w = input_shape
        self.kernel_size = kernel_size
        self.stride = stride
        # keep edge * channel the same during up sampling
        channels = [c] + [first_channel * 2 ** i for i in range(num_stages)]
        self.channels = channels
        self.last_shape = (channels[-1], h // 2 ** num_stages, w // 2 ** num_stages)
        seq = []
        for i in range(num_stages):
            # TODO:decide the padding and the automatically
            seq.append((f'down_sample_{i}', nn.Conv2d(channels[i], channels[i + 1], kernel_size, stride, padding=2)))
            seq.append((f'bn_{i}', nn.BatchNorm2d(channels[i + 1])))
            seq.append((f'leaky_relu_{i}', nn.LeakyReLU()))

        self.down_sampling_seq = nn.Sequential(OrderedDict(seq))
        self.ln = nn.Linear(self.last_shape[0] * self.last_shape[1] * self.last_shape[2], 1)

    def forward(self, img):
        c, h, w = self.last_shape
        img = self.down_sampling_seq(img)
        last_img = einops.rearrange(img, 'b c h w -> b (c h w)')
        out_img = self.ln(last_img)
        return out_img


if __name__ == '__main__':
    latent_dim = 64
    temp = torch.randn((9, latent_dim))
    stylegan = StyleGAN(latent_dim=latent_dim)
    # print(dcgan)
    out = stylegan(temp)
    print('generated:', out.shape)

    # discriminator = Discriminator()
    # print(discriminator)
    # disc = discriminator(out)
    # print('discriminated:', disc.shape)
