import torch.nn as nn
import torch


# Number of channels in the training images. For color images this is 3
NUM_CH = 3
# Size of z latent vector (i.e. size of generator input)
NUM_ZV = 100
# Imput image size
IMG_SIZE = 64


class Generator(nn.Module):
    def __init__(self, num_class):
        super(Generator, self).__init__()
        self.num_class = num_class

        self.image = nn.Sequential(
            # state size. (nz) x 1 x 1
            nn.ConvTranspose2d(NUM_ZV, 256, 4, 1, 0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(True)
            # state size. 256 x 4 x 4
        )
        self.label = nn.Sequential(
            # state size. (num_classes) x 1 x 1
            nn.ConvTranspose2d(self.num_class, 256, 4, 1, 0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(True)
            # state size. 256 x 4 x 4
        )
        self.main = nn.Sequential(
            # state size. 512 x 4 x 4
            nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            # state size. 256 x 8 x 8
            nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            # state size. 128 x 16 x 16
            nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            # state size. 64 x 32 x 32
            nn.ConvTranspose2d(64, NUM_CH, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, image, label):
        image = self.image(image)
        label = self.label(label)
        incat = torch.cat((image, label), dim=1)
        return self.main(incat)


class Discriminator(nn.Module):
    def __init__(self, num_class):
        super(Discriminator, self).__init__()
        self.num_class = num_class

        self.image = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(NUM_CH, 64, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True)
            # state size. 64 x 32 x 32
        )
        self.label = nn.Sequential(
            # input is (num_classes) x 64 x 64
            nn.Conv2d(self.num_class, 64, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True)
            # state size. 64 x 32 x 32
        )
        self.main = nn.Sequential(
            # state size. 128 x 32 x 32
            nn.Conv2d(128, 256, 4, 2, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. 256 x 16 x 16
            nn.Conv2d(256, 512, 4, 2, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. 512 x 8 x 8
            nn.Conv2d(512, 512, 4, 2, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. 512 x 4 x 4
            nn.Conv2d(512, 1, 4, 1, 0, bias=False),
            # state size. (1) x 1 x 1
        )

    def forward(self, image, label):
        image = self.image(image)
        label = self.label(label)
        incat = torch.cat((image, label), dim=1)
        res = self.main(incat)
        return torch.flatten(res, 1)


def weights_init(m: nn.Module):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


def cal_gp(real_imgs, fake_imgs, D_label, netD, device, lambda_gp=10):
    r = torch.rand(size=(real_imgs.shape[0], 1, 1, 1)).to(device)
    gp_imgs = (r*real_imgs + (1-r)*fake_imgs).requires_grad_(True)
    output_gp = netD(gp_imgs, D_label)
    grad0 = torch.autograd.grad(
        outputs=output_gp,
        inputs=gp_imgs,
        grad_outputs=torch.ones_like(output_gp).to(device),
        create_graph=True,
        retain_graph=True
    )[0]
    loss_gp = ((grad0.norm(2, dim=1) - 1) ** 2).mean()
    return loss_gp*lambda_gp


# generate class wrap
class G_Net:
    def __init__(self, pth_path: str, num_cls: int) -> None:
        self.num_cls = num_cls

        # load net
        self.netG = Generator(num_cls)
        self.netG.load_state_dict(torch.load(
            pth_path, map_location=torch.device('cpu')))

        # set 1-hot-map and noise
        label_1hots = torch.zeros(num_cls, num_cls)
        for i in range(num_cls):
            label_1hots[i, i] = 1
        self.G_1hots = label_1hots.view(num_cls, num_cls, 1, 1)

    def generate(self, label: int, device, num: int = 1):
        # chekck valid label
        if label in range(self.num_cls):
            # transfer to one-hot tensor
            input_label = self.G_1hots[torch.full([num, ], label)].to(device)
            input_noise = torch.randn(num, NUM_ZV, 1, 1).to(device)
            with torch.no_grad():
                fake_imgs = self.netG(input_noise, input_label)
                return fake_imgs
        return None
