import torch


import torch.nn as nn
import torch.nn.functional as F

from models.component.layer import SPADEResnetBlock, ResBlock, ConvBlock
from models.component.normlization import get_nonspade_norm_layer
from models.networks.basenetwork import BaseNetwork
from option import Options
import numpy as np


class ConvEncoder(BaseNetwork):
    def __init__(self):
        super(ConvEncoder, self).__init__()
        kw = 3
        pw = int(np.ceil((kw - 1.0) / 2))
        ndf = 64
        norm_layer = get_nonspade_norm_layer()
        self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw))
        self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw))
        self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw))
        self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw))
        self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw))
        self.so = s0 = 8
        self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256)
        self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256)
        self.actvn = nn.LeakyReLU(0.2, False)

    def forward(self, x):
        if x.size(2) != 256 or x.size(3) != 256:
            x = F.interpolate(x, size=(256, 256), mode='bilinear')
        x = self.layer1(x)
        x = self.layer2(self.actvn(x))
        x = self.layer3(self.actvn(x))
        x = self.layer4(self.actvn(x))
        x = self.layer5(self.actvn(x))
        x = self.actvn(x)
        x = x.view(x.size(0), -1)
        mu = self.fc_mu(x)
        logvar = self.fc_var(x)
        return mu, logvar


class ResBlockNet(nn.Module):

    def __init__(self, in_channels, out_channels):
        super(ResBlockNet, self).__init__()
        self.main = list()
        self.main.append(ResBlock(in_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main.append(ResBlock(out_channels, out_channels))
        self.main = nn.Sequential(*self.main)

    def forward(self, x):
        return self.main(x)

class SPADEGenerator(BaseNetwork):
    def __init__(self, opt):
        super(SPADEGenerator, self).__init__()
        self.opt = opt
        self.ngf = opt.ngf
        self.sw, self.sh = self.compute_latent_vector_size(opt)
        # self.encoder_reference = Encoder(in_channels=3, spec_norm=False, LR=0.2)
        # self.encoder_sketch = Encoder(in_channels=1, spec_norm=False, LR=0.2)
        # self.res_model = ResBlockNet(992 * 2, 256)
        # self.fc = nn.Conv2d(1, 16 * self.ngf, kernel_size=(3, 3), padding=1)
        self.fc = nn.Linear(opt.z_dim, 16 * self.ngf * self.sw * self.sh)
        self.head_0 = SPADEResnetBlock(16 * self.ngf, 16 * self.ngf, label_nc=1)
        self.G_middle_0 = SPADEResnetBlock(16 * self.ngf, 16 * self.ngf, label_nc=1)
        self.G_middle_1 = SPADEResnetBlock(16 * self.ngf, 16 * self.ngf, label_nc=1)
        self.up_0 = SPADEResnetBlock(16 * self.ngf, 8 * self.ngf, label_nc=1)
        self.up_1 = SPADEResnetBlock(8 * self.ngf, 4 * self.ngf, label_nc=1)

        self.up_2 = SPADEResnetBlock(4 * self.ngf, 2 * self.ngf, label_nc=1)
        self.up_3 = SPADEResnetBlock(2 * self.ngf, 1 * self.ngf, label_nc=1)

        self.conv_img = nn.Conv2d(self.ngf, 3, 3, padding=1)
        self.up = nn.Upsample(scale_factor=2)

    def forward(self, input, z):
        # v_r, _ = self.encoder_reference(ref)
        # v_s, feature_list = self.encoder_sketch(sketch)
        # v_c = torch.cat([v_r, v_s], dim=1)
        # rv_c = self.res_model(v_c)
        # x = torch.cat([sketch,ref],dim=1)
        seg = input
        if z is None:
            z = torch.randn(input.size(0), self.opt.z_dim,
                            dtype=torch.float32, device=self.opt.device)
        x = self.fc(z)
        x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)


        x = self.head_0(x, seg)
        x = self.up(x)

        x = self.G_middle_0(x, seg)
        x = self.G_middle_1(x, seg)
        x = self.up(x)

        x = self.up_0(x, seg)
        x = self.up(x)
        x = self.up_1(x, seg)
        x = self.up(x)
        x = self.up_2(x, seg)
        x = self.up(x)
        x = self.up_3(x, seg)
        x = self.conv_img(F.leaky_relu(x, 2e-1))
        x = torch.tanh(x)
        return x

    def compute_latent_vector_size(self, opt):
        num_up_layers = 5

        sw = opt.image_size // (2 ** num_up_layers)
        sh = round(sw / 1.0)

        return sw, sh

if __name__ == '__main__':
    # opt = Options().parse()
    # bs = 2
    # sketch = torch.randn(size=(bs,1,256,256))
    # ref = torch.randn(size=(bs,3,256,256))
    # G = SPADEGenerator(opt)
    # out = G(sketch, ref)
    # print(out.shape)

    conv = ConvEncoder()
    bs = 2
    ref = torch.randn(size=(bs,3,256,256))
    mu, logvar = conv(ref)
    print(mu.shape)
