''' 参考即插即用GAN的代码，初步构建网络（王耀 2021.9.17）'''

import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms

alexnet = torchvision.models.alexnet(pretrained=True)


# E 网络，通过 x 生成 h，h1
class EncPPGN(nn.Module):
    """
    作用：将 x 通过 E 得到 h、h1
    网络：预训练好的alexnet
    """

    def __init__(self):
        # 获得alex的前半部分
        self.features = nn.Sequential(
            *list(alexnet.features.children())[:12]
        )

        self.classifier = nn.Sequential(
            *list(alexnet.classifier.children())[1:3]
        )

    def forward(self, data):
        h = self.classifier(data)
        h1 = self.features(data)

        return h, h1


# G 网络，将 h 生成 x，GAN 过程
class GenPPGN(nn.Module):
    """
    将 h 生成 x，GAN 过程
    """
    def __init__(self):
        super(GenPPGN, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(4096, 4096, bias=False),
            nn.ReLU(),
            nn.Linear(4096, 4096, bias=False),
            nn.ReLU(),
            nn.Linear(4096, 4096, bias=False),
            nn.ReLU(),
        )

        self.deconv = nn.Sequential(
            # @todo(wangyao): 输入和输出的维度
            # deconv5, conv5
            nn.ConvTranspose2d(256, 256, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
            nn.ReLU(),
            nn.Conv2d(256, 512, kernel_size=(3,3), stride=(1,1), padding=1, bias=True),
            nn.ReLU(),
            # deconv4, conv4
            nn.ConvTranspose2d(512, 256, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=(3,3), stride=(1,1), padding=1, bias=True),
            nn.ReLU(),
            # deconv3, conv3
            nn.ConvTranspose2d(256, 128, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=(3,3), stride=(1,1), padding=1, bias=True),
            nn.ReLU(),
            # deconv2
            nn.ConvTranspose2d(128, 64, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
            nn.ReLU(),
            # deconv1
            nn.ConvTranspose2d(64, 32, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
            nn.ReLU(),
            # deconv0
            nn.ConvTranspose2d(32, 3, kernel_size=(4,4), stride=(2,2), padding=(1,1), bias=True),
        )

    def forward(self, h):
        x_hat = self.fc(h)
        x_hat = x_hat.view((h.size(0), 256, 4, 4))
        x_hat = self.deconv(x_hat)

        x_hat = x_hat[:, :, 14:241, 14:241]
        return x_hat


def ppgn_loss_img(img_fake, img_real):
     return nn.MSELoss(img_fake - img_real)

def ppgn_loss_fn(outputs, labels):
    return nn.CrossEntropyLoss()(outputs, labels)