#  Copyright (c) 2017-2020 Apache 2.0.
#  Author: Xiaozhong Ji
#  Update: 2020 - 5 - 28

import torch
import torch.nn as nn
import torchvision


class NLayerDiscriminator(nn.Module):
  """Defines a PatchGAN discriminator"""

  def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
    """Construct a PatchGAN discriminator

    Parameters:
        input_nc (int)  -- the number of channels in input images
        ndf (int)       -- the number of filters in the last conv layer
        n_layers (int)  -- the number of conv layers in the discriminator
        norm_layer      -- normalization layer
    """
    super(NLayerDiscriminator, self).__init__()
    use_bias = False
    kw = 4
    padw = 1
    sequence = [
      nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
      nn.LeakyReLU(0.2, True)]
    nf_mult = 1
    nf_mult_prev = 1
    for n in range(1, n_layers):  # gradually increase the number of filters
      nf_mult_prev = nf_mult
      nf_mult = min(2 ** n, 8)
      sequence += [
        nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,
                  padding=padw, bias=use_bias),
        norm_layer(ndf * nf_mult),
        nn.LeakyReLU(0.2, True)
      ]

    nf_mult_prev = nf_mult
    nf_mult = min(2 ** n_layers, 8)
    sequence += [
      nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,
                padding=padw, bias=use_bias),
      norm_layer(ndf * nf_mult),
      nn.LeakyReLU(0.2, True)
    ]

    sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1,
                           padding=padw)]  # output 1 channel prediction map
    # TODO
    self.model = nn.Sequential(*sequence)

  def forward(self, x):
    """Standard forward."""
    return self.model(x)


class Discriminator_VGG_128(nn.Module):
  def __init__(self, in_nc, nf):
    super(Discriminator_VGG_128, self).__init__()
    # [64, 128, 128]
    self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
    self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)
    self.bn0_1 = nn.BatchNorm2d(nf, affine=True)
    # [64, 64, 64]
    self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)
    self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)
    self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)
    self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)
    # [128, 32, 32]
    self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)
    self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)
    self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)
    self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)
    # [256, 16, 16]
    self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)
    self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)
    # [512, 8, 8]
    self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.linear1 = nn.Linear(512 * 4 * 4, 100)
    self.linear2 = nn.Linear(100, 1)

    # activation function
    self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

  def forward(self, x):
    fea = self.lrelu(self.conv0_0(x))
    fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))

    fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))
    fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))

    fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))
    fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))

    fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))
    fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))

    fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))
    fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))

    fea = fea.view(fea.size(0), -1)
    fea = self.lrelu(self.linear1(fea))
    out = self.linear2(fea)
    return out


class Discriminator_VGG_256(nn.Module):
  def __init__(self, in_nc, nf):
    super(Discriminator_VGG_256, self).__init__()
    # [64, 128, 128]
    self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
    self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)
    self.bn0_1 = nn.BatchNorm2d(nf, affine=True)
    # [64, 64, 64]
    self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)
    self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)
    self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)
    self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)
    # [128, 32, 32]
    self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)
    self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)
    self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)
    self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)
    # [256, 16, 16]
    self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)
    self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)
    # [512, 8, 8]
    self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.conv5_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn5_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv5_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn5_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.linear1 = nn.Linear(512 * 4 * 4, 100)
    self.linear2 = nn.Linear(100, 1)

    # activation function
    self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

  def forward(self, x):
    fea = self.lrelu(self.conv0_0(x))
    fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))

    fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))
    fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))

    fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))
    fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))

    fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))
    fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))

    fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))
    fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))

    fea = self.lrelu(self.bn5_0(self.conv5_0(fea)))
    fea = self.lrelu(self.bn5_1(self.conv5_1(fea)))

    fea = fea.view(fea.size(0), -1)
    fea = self.lrelu(self.linear1(fea))
    out = self.linear2(fea)
    return out


class Discriminator_VGG_512(nn.Module):
  def __init__(self, in_nc, nf):
    super(Discriminator_VGG_512, self).__init__()
    # [64, 128, 128]
    self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
    self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)
    self.bn0_1 = nn.BatchNorm2d(nf, affine=True)
    # [64, 64, 64]
    self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)
    self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)
    self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)
    self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)
    # [128, 32, 32]
    self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)
    self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)
    self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)
    self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)
    # [256, 16, 16]
    self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)
    self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)
    # [512, 8, 8]
    self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.conv5_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn5_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv5_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn5_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.conv6_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)
    self.bn6_0 = nn.BatchNorm2d(nf * 8, affine=True)
    self.conv6_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
    self.bn6_1 = nn.BatchNorm2d(nf * 8, affine=True)

    self.linear1 = nn.Linear(512 * 4 * 4, 100)
    self.linear2 = nn.Linear(100, 1)

    # activation function
    self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

  def forward(self, x):
    fea = self.lrelu(self.conv0_0(x))
    fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))

    fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))
    fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))

    fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))
    fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))

    fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))
    fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))

    fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))
    fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))

    fea = self.lrelu(self.bn5_0(self.conv5_0(fea)))
    fea = self.lrelu(self.bn5_1(self.conv5_1(fea)))

    fea = self.lrelu(self.bn6_0(self.conv6_0(fea)))
    fea = self.lrelu(self.bn6_1(self.conv6_1(fea)))

    fea = fea.view(fea.size(0), -1)
    fea = self.lrelu(self.linear1(fea))
    out = self.linear2(fea)
    return out


class VGGFeatureExtractor(nn.Module):
  def __init__(self, feature_layer=34, use_bn=False, use_input_norm=True):
    super(VGGFeatureExtractor, self).__init__()
    self.use_input_norm = use_input_norm
    if use_bn:
      model = torchvision.models.vgg19_bn(pretrained=True)
    else:
      model = torchvision.models.vgg19(pretrained=True)
    if self.use_input_norm:
      mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
      # [0.485 - 1, 0.456 - 1, 0.406 - 1] if input in range [-1, 1]
      std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
      # [0.229 * 2, 0.224 * 2, 0.225 * 2] if input in range [-1, 1]
      self.register_buffer('mean', mean)
      self.register_buffer('std', std)
    self.features = nn.Sequential(
        *list(model.features.children())[:(feature_layer + 1)])
    # No need to BP to variable
    for k, v in self.features.named_parameters():
      v.requires_grad = False

  def forward(self, x):
    # Assume input range is [0, 1]
    if self.use_input_norm:
      dev = x.device
      x = (x - self.mean.to(dev)) / self.std.to(dev)
    output = self.features(x)
    return output
