from math import log2

from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch import nn, einsum
from einops import rearrange
from kornia.filters import filter2d
import math
    
def exists(val):
    return val is not None

def image_noise(n, im_size, device):
    return torch.FloatTensor(n, 1, im_size, im_size).uniform_(0., 1.).to(device)

def leaky_relu(p=0.2):
    return nn.LeakyReLU(p, inplace=True)

class Flatten(nn.Module):
    def forward(self, x):
        return x.reshape(x.shape[0], -1)

class Blur(nn.Module):
    def __init__(self):
        super().__init__()
        f = torch.Tensor([1, 2, 1])
        self.register_buffer('f', f)
    def forward(self, x):
        f = self.f
        f = f[None, None, :] * f [None, :, None]
        return filter2d(x, f, normalized=True)
    
    
class ConstantInput(nn.Module):
    def __init__(self, channel, size=4):
        super().__init__()

        self.input = nn.Parameter(torch.randn(1, channel, size, size))

    def forward(self, input):
        batch = input.shape[0]
        out = self.input.repeat(batch, 1, 1, 1)

        return out

# attention
class Residual(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn
    def forward(self, x):
        return self.fn(x) + x

class ChanNorm(nn.Module):
    def __init__(self, dim, eps = 1e-5):
        super().__init__()
        self.eps = eps
        self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
        self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))

    def forward(self, x):
        var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
        mean = torch.mean(x, dim = 1, keepdim = True)
        return (x - mean) / (var + self.eps).sqrt() * self.g + self.b

class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.fn = fn
        self.norm = ChanNorm(dim)

    def forward(self, x):
        return self.fn(self.norm(x))

class DepthWiseConv2d(nn.Module):
    def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True):
        super().__init__()
        self.net = nn.Sequential(
            nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias),
            nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias)
        )
    def forward(self, x):
        return self.net(x)

class LinearAttention(nn.Module):
    def __init__(self, dim, dim_head = 64, heads = 8):
        super().__init__()
        self.scale = dim_head ** -0.5
        self.heads = heads
        inner_dim = dim_head * heads

        self.nonlin = nn.GELU()
        self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False)
        self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding = 1, bias = False)
        self.to_out = nn.Conv2d(inner_dim, dim, 1)

    def forward(self, fmap):
        h, x, y = self.heads, *fmap.shape[-2:]
        q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1))
        q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))

        q = q.softmax(dim = -1)
        k = k.softmax(dim = -2)

        q = q * self.scale

        context = einsum('b n d, b n e -> b d e', k, v)
        out = einsum('b n d, b d e -> b n e', q, context)
        out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)

        out = self.nonlin(out)
        return self.to_out(out)
    
attn_and_ff = lambda chan: nn.Sequential(*[
    Residual(PreNorm(chan, LinearAttention(chan))),
    Residual(PreNorm(chan, nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])

class GeneratorBlock(nn.Module):
    def __init__(self, input_channels, filters, upsample=True):
        super().__init__()
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None

        self.conv1 = nn.Conv2d(input_channels, filters, 3, 1, 1)
        self.conv2 = nn.Conv2d(filters, filters, 3, 1, 1)

        self.activation = leaky_relu()

    def forward(self, x):
        if exists(self.upsample):
            x = self.upsample(x)

        x = self.conv1(x)
        x = self.activation(x)

        x = self.conv2(x)
        x = self.activation(x)
        return x


class Generator(nn.Module):
    def __init__(
            self,
            image_size,
            network_capacity=16,
            fmap_max = 512,
            latent_dim=512,
    ):
        super().__init__()
        self.num_layers = int(log2(image_size) - 1)
        self.latent_dim = latent_dim
        
        filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
        set_fmap_max = partial(min, fmap_max)
        filters = list(map(set_fmap_max, filters))
        init_channels = filters[0]
        
        filters = [init_channels, *filters]
        in_out_pairs = zip(filters[:-1], filters[1:])

        self.initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)
        self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)
        
        self.conv1 = nn.Conv2d(1, init_channels, 3, padding=1)
        
        self.blocks = nn.ModuleList([])
        
        for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
            not_first = ind != 0
            # not_last = ind != (self.num_layers - 1)
            
            block = GeneratorBlock(
                in_chan,
                out_chan,
                upsample=not_first,
            )
            self.blocks.append(block)
            
        self.to_mri = nn.Conv2d(
            in_channels=filters[-1], 
            out_channels=1, 
            kernel_size=1,
            stride=1,
            padding=0
        )

    def make_image_noise(self, x):
        batch_size, _, _, _ = x.size()
        return torch.FloatTensor(batch_size, self.latent_dim, 1, 1).uniform_(0., 1.).to(x.device)
    
    def _initialize_weights(self):
        classname = self.modules().__class__.__name__
        
        if classname.find('Conv') != -1:
            nn.init.normal_(m.weight.data, 0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0)
        
    def forward(self, input_noise):        
        x = self.initial_block(input_noise)
        x = self.initial_conv(x)

        for block in self.blocks:
            x = block(x)
        
        x = self.to_mri(x)
        return x


class DiscriminatorBlock(nn.Module):
    def __init__(self, input_channels, filters, downsample=True):
        super().__init__()
        self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))

        self.net = nn.Sequential(
            nn.Conv2d(input_channels, filters, 3, padding=1),
            leaky_relu(),
        )

        self.downsample = nn.Sequential(
            Blur(),
            nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
        ) if downsample else None

    def forward(self, x):
        res = self.conv_res(x)
        x = self.net(x)
        if exists(self.downsample):
            x = self.downsample(x)
        x = (x + res) * (1 / math.sqrt(2))
        return x
    
    
class Discriminator(nn.Module):
    def __init__(self, 
                 image_size, 
                 network_capacity = 16, 
                 attn_layers = [], 
                 fmap_max = 512,
                 cls=1):
        super().__init__()
        num_layers = int(log2(image_size) - 1)
        num_init_filters = cls

        blocks = []
        filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]

        set_fmap_max = partial(min, fmap_max)
        filters = list(map(set_fmap_max, filters))
        chan_in_out = list(zip(filters[:-1], filters[1:]))

        blocks = []
        attn_blocks = []
        quantize_blocks = []

        for ind, (in_chan, out_chan) in enumerate(chan_in_out):
            num_layer = ind + 1
            is_not_last = ind != (len(chan_in_out) - 1)

            block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)
            blocks.append(block)

            attn_fn = attn_and_ff(out_chan) if num_layer in attn_layers else None

            attn_blocks.append(attn_fn)

        self.blocks = nn.ModuleList(blocks)
        self.attn_blocks = nn.ModuleList(attn_blocks)
        self.quantize_blocks = nn.ModuleList(quantize_blocks)

        chan_last = filters[-1]
        latent_dim = 2 * 2 * chan_last

        self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)
        self.flatten = Flatten()
        self.to_logit = nn.Linear(latent_dim, 1)
        
    def _initialize_weights(self):
        classname = self.modules().__class__.__name__
        
        if classname.find('Conv') != -1:
            nn.init.normal_(m.weight.data, 0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0)

    def forward(self, x):
        b, *_ = x.shape

        for (block, attn_block) in zip(self.blocks, self.attn_blocks):
            x = block(x)

            if exists(attn_block):
                x = attn_block(x)

        x = self.final_conv(x)
        x = self.flatten(x)
        x = self.to_logit(x)
        return x.squeeze()
    
    
if __name__ == "__main__":
    gen = Generator(256,
            network_capacity=32,
            fmap_max = 512,
            latent_dim=512,)
    gen._initialize_weights()

    real_image = torch.randn((1, 1, 256, 256))
    noise = gen.make_image_noise(real_image)
    fake_image = gen(noise)

    dis = Discriminator(256, attn_layers=[])
    dis._initialize_weights()
    out = dis(fake_image)
    
    print(real_image.size())
    print(fake_image.size())


