import copy
import math

import numpy as np
from munch import Munch

from paddle import fluid
import paddle

from utils.logger import get_logger
logger = get_logger(__name__)


def leaky_relu(inputs):
    return fluid.layers.leaky_relu(inputs, alpha=0.2)

# variant affine=False cannot set
norm_index = 0
def instance_norm2d(inputs):
    # global norm_index
    if len(inputs.shape) != 4:
        raise ValueError('expected 4D input (got {}D input)'
                         .format(len(inputs.shape)))
    # param_attr = fluid.ParamAttr(name=f'norm.{norm_index}.weight', initializer=fluid.initializer.Constant(value=1.0))
    # bias_attr = fluid.ParamAttr(name=f'norm.{norm_index}.bias', initializer=fluid.initializer.Constant(value=0.0))
    # norm_index += 1
    # return fluid.layers.instance_norm(inputs, param_attr=param_attr, bias_attr=bias_attr)
    return fluid.layers.instance_norm(inputs)

def avg_pool2d(inputs, kernel_size):
    return fluid.layers.pool2d(inputs, pool_size=kernel_size, pool_stride=kernel_size, pool_type='avg')

def relu(inputs):
    return fluid.layers.relu(inputs)


class Discriminator(fluid.dygraph.Layer):
    def __init__(self, img_size=256, num_domains=2, max_conv_dim=512):
        super(Discriminator, self).__init__()
        dim_in = 2**14 // img_size
        blocks = []
        blocks += [fluid.dygraph.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in*2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [leaky_relu]
        blocks += [fluid.dygraph.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [leaky_relu]
        blocks += [fluid.dygraph.Conv2D(dim_out, num_domains, 1, 1, 0)]
        self.layers = blocks
        # self.main = nn.Sequential(*blocks)
        self.main = self._main

    def _main(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

    def forward(self, x, y):
        # x: torch.Size([4, 3, 256, 256])
        # y: torch.Size([4])
        # out: torch.Size([4])

        out = self.main(x)
        out = fluid.layers.reshape(out, (out.shape[0], -1))  # (batch, num_domains)
        logger.debug(f'out: {out.shape}')
        idx = fluid.dygraph.to_variable(np.array(range(y.shape[0]))).astype('int')

        s = []
        for i in range(idx.shape[0]):
            s += [out[idx[i].numpy().astype(np.int).tolist()[0], y[i].numpy().astype(np.int).tolist()[0]]]
        s = fluid.layers.stack(s)
        s = fluid.layers.reshape(s, (s.shape[0], ))
        return s


class StyleEncoder(fluid.dygraph.Layer):
    def __init__(self, img_size=256, style_dim=64, num_domains=2, max_conv_dim=512):
        super(StyleEncoder, self).__init__()

        dim_in = 2**14 // img_size
        blocks = []
        blocks += [fluid.dygraph.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in*2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [leaky_relu]
        blocks += [fluid.dygraph.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [leaky_relu]

        # variant self.shared = nn.Sequential(*blocks)
        self.layers = blocks
        self.shared = self._shared

        self.unshared = []
        for _ in range(num_domains):
            self.unshared += [fluid.dygraph.Linear(dim_out, style_dim)]

    def _shared(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

    def forward(self, x, y):
        # x: torch.Size([4, 3, 256, 256])
        # y: torch.Size([4])
        # s: torch.Size([4, 64])

        h = self.shared(x)
        h = fluid.layers.reshape(h, (h.shape[0], -1))
        out = []
        for layer in self.unshared:
            out += [layer(h)]
        out = fluid.layers.stack(out, axis=1)   # (batch, num_domains, style_dim)
        idx = fluid.dygraph.to_variable(np.array(range(y.shape[0]))).astype('int')

        s = []
        for i in range(idx.shape[0]):
            s += [out[idx[i].numpy().astype(np.int).tolist()[0], y[i].numpy().astype(np.int).tolist()[0]]]
        s = fluid.layers.stack(s)
        return s


class MappingNetwork(fluid.dygraph.Layer):
    def __init__(self, latent_dim=16, style_dim=64, num_domains=2):
        super(MappingNetwork, self).__init__()
        self.layers = []
        self.layers += [fluid.dygraph.Linear(latent_dim, 512)]
        self.layers += [relu]
        for _ in range(3):
            self.layers += [fluid.dygraph.Linear(512, 512)]
            self.layers += [relu]

        # variant self.shared = nn.Sequential(*layers)
        self.shared = self._shared

        # variant self.unshared = nn.ModuleList()
        self.unshared = []
        for _ in range(num_domains):
            # variant nn.Sequential
            self.unshared += [(fluid.dygraph.Linear(512, 512),
                                            relu,
                                            fluid.dygraph.Linear(512, 512),
                                            relu,
                                            fluid.dygraph.Linear(512, 512),
                                            relu,
                                            fluid.dygraph.Linear(512, style_dim))]
        logger.debug(f'unshared: {len(self.unshared)}')

    def _shared(self, x):
        for layer in self.layers:
            x = layer(x)
        return x

    def forwad_layers(self, layers, x):
        for layer in layers:
            x = layer(x)
        return x

    def forward(self, z, y):
        # z: torch.Size([4, 16])
        # y: torch.Size([4])
        # s: torch.Size([4, 64])
        h = self.shared(z)
        out = []
        for layer in self.unshared:
            # variant out += [layer(h)]
            out += [self.forwad_layers(layer, h)]

        #out: [4, 3, 64]
        out = fluid.layers.stack(out, axis=1)  # (batch, num_domains, style_dim)
        # logger.debug(f'out: {out.shape}')

        # idx = torch.LongTensor(range(y.size(0))).to(y.device)
        idx = fluid.dygraph.to_variable(np.array(range(y.shape[0]))).astype('int')
        # logger.debug(f'out: {type(out)}')
        # logger.debug(f'idx: {idx}, {type(idx)}')
        # logger.debug(f'y: {y}')
        # variant s = out[idx, y]  # (batch, style_dim)
        s = []
        for i in range(idx.shape[0]):
            s += [out[idx[i].numpy().astype(np.int).tolist()[0], y[i].numpy().astype(np.int).tolist()[0]]]
        s = fluid.layers.stack(s)
        return s


class ResBlk(fluid.dygraph.Layer):
    def __init__(self, dim_in, dim_out, actv=leaky_relu,
                 normalize=False, downsample=False):
        super(ResBlk, self).__init__()
        self.actv = actv
        self.normalize = normalize
        self.downsample = downsample
        self.learned_sc = dim_in != dim_out
        self._build_weights(dim_in, dim_out)

    def _build_weights(self, dim_in, dim_out):
        self.conv1 = fluid.dygraph.Conv2D(dim_in, dim_in, 3, 1, 1)
        self.conv2 = fluid.dygraph.Conv2D(dim_in, dim_out, 3, 1, 1)
        if self.normalize:
            self.norm1 = instance_norm2d
            self.norm2 = instance_norm2d
            # self.norm1 = InstanceNorm2d()
            # self.norm2 = InstanceNorm2d()
        if self.learned_sc:
            # variant bias=False cannot set
            self.conv1x1 = fluid.dygraph.Conv2D(dim_in, dim_out, 1, 1, 0)

    def _shortcut(self, x):
        if self.learned_sc:
            x = self.conv1x1(x)
        if self.downsample:
            x = avg_pool2d(x, 2)
        return x

    def _residual(self, x):
        if self.normalize:
            x = self.norm1(x)
        x = self.actv(x)
        x = self.conv1(x)
        if self.downsample:
            x = avg_pool2d(x, 2)
        if self.normalize:
            x = self.norm2(x)
        x = self.actv(x)
        x = self.conv2(x)
        return x

    def forward(self, x):
        x = self._shortcut(x) + self._residual(x)
        return x / math.sqrt(2)  # unit variance


class AdaIN(fluid.dygraph.Layer):
    def __init__(self, style_dim, num_features):
        super(AdaIN, self).__init__()
        self.norm = instance_norm2d
        self.fc = fluid.dygraph.Linear(style_dim, num_features*2)

    def forward(self, x, s):
        h = self.fc(s)
        h = fluid.layers.reshape(h, (h.shape[0], h.shape[1], 1, 1))
        # logger.debug(f'h: {h.shape}')

        # h: torch.Size([4, 1024, 1, 1])
        # gamma: torch.Size([4, 512, 1, 1])
        # beta: torch.Size([4, 512, 1, 1])
        # gamma, beta = fluid.layers.unstack(h, axis=1, num=2)
        # variant chicken paddle
        y = fluid.layers.unstack(h, axis=1)
        split = int(h.shape[1] / 2)
        gamma = fluid.layers.concat(y[:split], axis=1)
        beta = fluid.layers.concat(y[split:], axis=1)
        gamma = fluid.layers.reshape(gamma, (gamma.shape[0], gamma.shape[1], 1, 1))
        beta = fluid.layers.reshape(beta, (beta.shape[0], beta.shape[1], 1, 1))
        # logger.debug(f'gamma: {gamma.shape}')
        # logger.debug(f'beta: {beta.shape}')
        return (1 + gamma) * self.norm(x) + beta


class AdainResBlk(fluid.dygraph.Layer):
    def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0,
                 actv=leaky_relu, upsample=False):
        super(AdainResBlk, self).__init__()
        self.w_hpf = w_hpf
        self.actv = actv
        self.upsample = upsample
        self.learned_sc = dim_in != dim_out
        self._build_weights(dim_in, dim_out, style_dim)

    def _build_weights(self, dim_in, dim_out, style_dim=64):
        self.conv1 = fluid.dygraph.Conv2D(dim_in, dim_out, 3, 1, 1)
        self.conv2 = fluid.dygraph.Conv2D(dim_out, dim_out, 3, 1, 1)
        self.norm1 = AdaIN(style_dim, dim_in)
        self.norm2 = AdaIN(style_dim, dim_out)
        if self.learned_sc:
            # variant bias=False cannot set
            self.conv1x1 = fluid.dygraph.Conv2D(dim_in, dim_out, 1, 1, 0)

    def _shortcut(self, x):
        if self.upsample:
            # variant F.interpolate(x, scale_factor=2, mode='nearest')
            x = self.interpolate(x, scale_factor=2., mode='NEAREST')
        if self.learned_sc:
            x = self.conv1x1(x)
        return x

    def interpolate(self, x, scale_factor, mode):
        return fluid.layers.image_resize(x, scale=scale_factor, resample=mode)

    def _residual(self, x, s):
        x = self.norm1(x, s)
        x = self.actv(x)
        if self.upsample:
            # variant F.interpolate(x, scale_factor=2, mode='nearest')
            x = self.interpolate(x, scale_factor=2., mode='NEAREST')
        x = self.conv1(x)
        x = self.norm2(x, s)
        x = self.actv(x)
        x = self.conv2(x)
        return x

    def forward(self, x, s):
        out = self._residual(x, s)
        if self.w_hpf == 0:
            out = (out + self._shortcut(x)) / math.sqrt(2)
        return out


class InstanceNorm2d(fluid.dygraph.Layer):
    def __init__(self, in_dim):
        super(InstanceNorm2d, self).__init__()
        self.weight = fluid.layers.create_parameter(shape=[in_dim], is_bias=False, dtype='float32', default_initializer=fluid.initializer.Constant(value=1.0))
        self.bias = fluid.layers.create_parameter(shape=[in_dim], is_bias=True, dtype='float32', default_initializer=fluid.initializer.Constant(value=0.0))

    def forward(self, x):
        return fluid.layers.instance_norm(x, param_attr=self.weight, bias_attr=self.bias)


class LeakyReLU(fluid.dygraph.Layer):
    def __init__(self, alpha=0.02):
        super(LeakyReLU, self).__init__()
        self.alpha = alpha

    def forward(self, x):
        return fluid.layers.leaky_relu(x, alpha=self.alpha)

class Generator(fluid.dygraph.Layer):
    def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
        super(Generator, self).__init__()
        dim_in = 2**14 // img_size
        self.img_size = img_size
        self.from_rgb = fluid.dygraph.Conv2D(3, dim_in, 3, stride=1, padding=1)
        self.encode = []
        self.encode = []
        self.decode = []
        # self.add_sublayer('to_rgb', fluid.dygraph.Conv2D(dim_in, 3, 1, 1, 0))
        # self.add_sublayer('to_rgb1', self._to_rgb)
        # self.to_rgb_ = self._to_rgb
        self.to_rgb = fluid.dygraph.Sequential(
            ('in2d', fluid.dygraph.InstanceNorm(dim_in)),
            ('leaky_relu', LeakyReLU(0.2)),
            ('conv2d', fluid.dygraph.Conv2D(dim_in, 3, 1, 1, 0))
        )

        # down/up-sampling blocks
        repeat_num = int(np.log2(img_size)) - 4
        if w_hpf > 0:
            repeat_num += 1
        layer_index = 0
        for _ in range(repeat_num):
            dim_out = min(dim_in*2, max_conv_dim)
            self.encode.append(
                self.add_sublayer(f'encode.{layer_index}', ResBlk(dim_in, dim_out, normalize=True, downsample=True)))
            self.decode.insert(
                0, self.add_sublayer(f'decode.{layer_index}', AdainResBlk(dim_out, dim_in, style_dim,
                               w_hpf=w_hpf, upsample=True)))  # stack-like
            dim_in = dim_out
            layer_index += 1

        # bottleneck blocks
        for _ in range(2):
            self.encode.append(
                self.add_sublayer(f'encode.{layer_index}', ResBlk(dim_out, dim_out, normalize=True)))
            self.decode.insert(
                0, self.add_sublayer(f'decode.{layer_index}', AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf)))
            layer_index += 1

        # if w_hpf > 0:
        #     device = torch.device(
        #         'cuda' if torch.cuda.is_available() else 'cpu')
        #     self.hpf = HighPass(w_hpf, device)

    def _to_rgb(self, inputs):
        x = instance_norm2d(inputs)
        x = leaky_relu(x)
        x = self.to_rgb(x)
        return x

    def forward(self, x, s, masks=None):
        # logger.debug(f'x: {x.shape}')
        x = self.from_rgb(x)
        # logger.debug(f'from_rgb x: {x.shape}')
        cache = {}
        for block in self.encode:
            if (masks is not None) and (x.size(2) in [32, 64, 128]):
                cache[x.size(2)] = x
            x = block(x)
        # logger.debug(f'x: {x.shape}')
        for block in self.decode:
            x = block(x, s)
            if (masks is not None) and (x.shape[2] in [32, 64, 128]):
                mask = masks[0] if x.shape[2] in [32] else masks[1]
                # variant F.interpolate(mask, size=x.size(2), mode='bilinear')
                mask = fluid.layers.image_resize(mask, out_shape=x.shape[2], resample='BILINEAR')
                x = x + self.hpf(mask * cache[x.shape[2]])
        # logger.debug(f'x: {x.shape}')
        return self.to_rgb(x)



def build_model(args):
    generator = Generator(args.img_size, args.style_dim, w_hpf=args.w_hpf)
    mapping_network = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains)
    style_encoder = StyleEncoder(args.img_size, args.style_dim, args.num_domains)
    discriminator = Discriminator(args.img_size, args.num_domains)
    logger.debug(f'generator: {generator}')

    # variant
    # generator_ema = copy.deepcopy(generator)
    generator_ema = Generator(args.img_size, args.style_dim, w_hpf=args.w_hpf)
    mapping_network_ema = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains)
    style_encoder_ema = StyleEncoder(args.img_size, args.style_dim, args.num_domains)

    nets = Munch(generator=generator,
                 mapping_network=mapping_network,
                 style_encoder=style_encoder,
                 discriminator=discriminator)
    nets_ema = Munch(generator=generator_ema,
                     mapping_network=mapping_network_ema,
                     style_encoder=style_encoder_ema)
    return nets, nets_ema



