import math

import torch.nn as nn
from einops.layers.torch import Rearrange
from einops import pack, unpack, rearrange

from sgm.modules.attention import Attention


def cast_tuple(val, l=1):
    return val if isinstance(val, tuple) else (val,) * l


class DiscriminatorBlock(nn.Module):
    def __init__(self, input_channels, output_channels, kernel_size=3, padding=1, down_sample=True):
        super().__init__()
        self.res = nn.Conv2d(in_channels=input_channels, out_channels=output_channels,
                             kernel_size=1, stride=2 if down_sample else 1)
        self.net = nn.Sequential(
            nn.Conv2d(in_channels=input_channels, out_channels=output_channels,
                      kernel_size=kernel_size, padding=padding),
            nn.LeakyReLU(),
            nn.Conv2d(in_channels=output_channels, out_channels=output_channels,
                      kernel_size=kernel_size, padding=padding),
            nn.LeakyReLU()
        )
        self.down_sample = down_sample
        self.down_sampler = nn.Sequential(
            Rearrange('b c (h ph) (w pw) -> b (c ph pw) h w', ph=2, pw=2),
            nn.Conv2d(in_channels=output_channels * 4, out_channels=output_channels, kernel_size=1)
        ) if down_sample else None

    def forward(self, x):
        res = self.res(x)
        x = self.net(x)

        if self.down_sample:
            x = self.down_sampler(x)

        x = (x + res) / (1 / math.sqrt(2))
        return x


class Discriminator(nn.Module):
    def __init__(self, dim, image_size, channels=3, kernel_size=3, padding=1, attn_resolution_layers=(16,),
                 max_dim=512):
        super().__init__()

        min_img_resolution = min(image_size)
        img_resolution = min_img_resolution
        num_layers = int(math.log2(min_img_resolution) - 2)
        attn_resolution_layers = cast_tuple(attn_resolution_layers, num_layers)

        layer_dims = [channels] + [(dim * 4) * (2 ** i) for i in range(num_layers + 1)]
        layer_dims = [min(layer_dim, max_dim) for layer_dim in layer_dims]
        layer_dims_in_out = tuple(zip(layer_dims[:-1], layer_dims[1:]))

        con_blocks, attn_blocks = [], []

        for index, (in_channel, out_channel) in enumerate(layer_dims_in_out):
            is_not_last = index != (len(layer_dims_in_out) - 1)

            conv_block = DiscriminatorBlock(in_channel, out_channel, down_sample=is_not_last)
            con_blocks.append(conv_block)

            attn_block = None
            if img_resolution in attn_resolution_layers:
                attn_block = Attention(dim=out_channel)

            attn_blocks.append(attn_block)
            img_resolution //= 2

        self.conv_blocks = nn.ModuleList(con_blocks)
        self.attn_blocks = nn.ModuleList(attn_blocks)

        last_dim = layer_dims[-1]
        down_sample_factor = 2 ** num_layers
        last_fmap_size = tuple(map(lambda n: n // down_sample_factor, image_size))
        last_feature_dim = last_fmap_size[0] * last_fmap_size[1] * last_dim

        self.cls = nn.Sequential(
            nn.Conv2d(last_dim, last_dim, kernel_size=kernel_size, padding=padding),
            nn.LeakyReLU(),
            Rearrange('b ... -> b (...)'),
            nn.Linear(last_feature_dim, 1),
            Rearrange('b 1 -> b')
        )

    def forward(self, x):
        for conv_block, attn_block in zip(self.conv_blocks, self.attn_blocks):
            x = conv_block(x)

            if attn_block is not None:
                x, packed_shape = pack([x], 'b c *')
                x = rearrange(x, 'b c n -> b n c')
                x = attn_block(x) + x
                x = rearrange(x, 'b n c -> b c n')
                x = unpack(x, packed_shape, 'b c *')

        return self.cls(x)
