# Modified  from https://github.com/xinntao/BasicSR
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.module_util as mutil
import torchvision


###############################################################
# 生成器网络
# NRP network
# These purifiers are based desnet (around 14Million parameters) and ResNet (only 1.2Million parameters) based architecture.
# RDB(Residual Dense Block)
# 相当于将Residual block (ResBlock)与Denseblock密集块相结合
# 通过密集连通卷积层提取丰富的局部特征
# 从先前RDB的状态直接连接到当前RDB的所有层
# 然后利用RDB的局部特征融合自适应地从先前和当前的局部特征中学习更有效的特征, 使训练更加稳定。
 
# 每个RDB块都有5个卷积, 然后通过torch.cat函数, 将卷积的通道数相叠加
# 所以卷积的通道数由num_feat, 叠加变成num_feat + 4 * num_grow_ch
# 利用最后一个卷积将通道数调整为num_feat , 通过x5 * 0.2 + x构建残差边。

# 参考SRGAN
class ResidualDenseBlock_5C(nn.Module):
    '''
    Residual Dense Block.

    Used in RRDB block in ESRGAN.

    Args:
        nf: num_feat (int), Channel number of intermediate features.
        gc: num_grow_ch (int), Channels for each growth.
    '''

    def __init__(self, nf=64, gc=32, bias=True):
        super(ResidualDenseBlock_5C, self).__init__()
        # gc: growth channel, i.e. intermediate channels
        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)

        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # initialization
        # default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)

    def forward(self, x):
        x1 = self.lrelu(self.conv1(x))
        x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) # 这里指通道数的叠加，所以每次叠加结束，通道数都增加num_grow_ch
        x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
        x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
        x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
        # Emperically, we use 0.2 to scale the residual for better performance
        return x5 * 0.2 + x # 这里指残差边，0.2指残差边的参数


# RRDB (Residual in Residual Dense Block)
# 采用两层残差结构，RRDB结构由一个大的残差结构构成，主干部分由3个 RDB(Residual Dense Block)密集残差块构成，将主干网络的输出与残差边叠加。
# 网络由多个basic block构成, 每个basicblock 都由RRDB组成, 其中RRDB即在级联3个RDB的基础上添加残差边
class RRDB(nn.Module):
    '''
    Residual in Residual Dense Block.

    Used in RRDB-Net in ESRGAN.

    Args:
        nf: num_feat (int), Channel number of intermediate features.
        gc: num_grow_ch (int), Channels for each growth.
    '''

    def __init__(self, nf, gc=32):
        super(RRDB, self).__init__()
        self.RDB1 = ResidualDenseBlock_5C(nf, gc)
        self.RDB2 = ResidualDenseBlock_5C(nf, gc)
        self.RDB3 = ResidualDenseBlock_5C(nf, gc)

    def forward(self, x):
        out = self.RDB1(x)
        out = self.RDB2(out)
        out = self.RDB3(out)
        # Emperically, we use 0.2 to scale the residual for better performance
        return out * 0.2 + x # 构建残差边


class NRP(nn.Module):
    '''
    Args:
        in_nc: num_in_ch (int), Channel number of inputs.
        out_nc: num_out_ch (int), Channel number of outputs.
        nf: num_feat (int), Channel number of intermediate features.
            Default: 64
        nb: num_block (int), Block number in the trunk network. Defaults: 23
        gc: num_grow_ch (int), Channels for each growth. Default: 32.
    '''

    def __init__(self, in_nc, out_nc, nf, nb, gc=32):
        super(NRP, self).__init__()
        RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
        self.RRDB_trunk = mutil.make_layer(RRDB_block_f, nb)
        self.trunk_conv = nn.Conv2d(nf, 3, 3, 1, 1, bias=True) # out_nc = 3

    def forward(self, x):
        fea = self.conv_first(x)
        trunk = self.trunk_conv(self.RRDB_trunk(fea))

        return trunk


#################################################################
# NRP based on ResNet Generator
# These purifiers are based desnet (around 14Million parameters) and ResNet (only 1.2Million parameters) based architecture.
# 参考SRResNet
class NRP_resG(nn.Module):

    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=23):
        super(NRP_resG, self).__init__()

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
        # functools.partial 这个高阶函数用于部分应用一个函数。部分应用是指, 基于一个函数创建一个新的可调用对象, 把原函数的某些参数固定。使用这个函数可以把接受一个或多个参数的函数改编成需要回调的API, 这样参数更少。 
        # 这里的实际作用就是向ResidualBlock_noBN传参。
        basic_block = functools.partial(mutil.ResidualBlock_noBN, nf=nf)
        self.recon_trunk = mutil.make_layer(basic_block, nb)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

    def forward(self, x):
        fea = self.lrelu(self.conv_first(x))
        out = self.conv_last(self.recon_trunk(fea))
        return out

#################################################################
# 鉴别器网络
# It consists of five convolutional blocks containing convolutional layers followed by batch-norm and leaky-relu and then a fully connected layer
# 输入为一张图片, 输出为一个数, 其输入尺寸必须为 128 * 128 
# 其中经过 10 次卷积将图片不断下采样到 4 * 4
# 通道变为num_feat*8, 通过view函数将卷积结果展开成一维向量, 再经过两个全连接层, 输出一个数。
class Critic(nn.Module):  # class VGGStyleDiscriminator128(nn.Module): 
    """VGG style discriminator with input size 128 x 128.

    It is used to train SRGAN and ESRGAN.

    Args:
        num_in_ch (int): Channel number of inputs. Default: 3.
        num_feat (int): Channel number of base intermediate features.
            Default: 64.
    """

    def __init__(self, num_in_ch, num_feat):
        super(Critic, self).__init__()
        #convn_0为3*3的卷积，convn_1为4*4的卷积，并且将图片下采样到1/2,通道数变为原来的2倍
        self.conv0_0 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1, bias=True)
        self.conv0_1 = nn.Conv2d(num_feat, num_feat, 4, 2, 1, bias=False)
        self.bn0_1 = nn.BatchNorm2d(num_feat, affine=True)

        self.conv1_0 = nn.Conv2d(num_feat, num_feat * 2, 3, 1, 1, bias=False)
        self.bn1_0 = nn.BatchNorm2d(num_feat * 2, affine=True)
        self.conv1_1 = nn.Conv2d(num_feat * 2, num_feat * 2, 4, 2, 1, bias=False)
        self.bn1_1 = nn.BatchNorm2d(num_feat * 2, affine=True)

        self.conv2_0 = nn.Conv2d(num_feat * 2, num_feat * 4, 3, 1, 1, bias=False)
        self.bn2_0 = nn.BatchNorm2d(num_feat * 4, affine=True)
        self.conv2_1 = nn.Conv2d(num_feat * 4, num_feat * 4, 4, 2, 1, bias=False)
        self.bn2_1 = nn.BatchNorm2d(num_feat * 4, affine=True)

        self.conv3_0 = nn.Conv2d(num_feat * 4, num_feat * 8, 3, 1, 1, bias=False)
        self.bn3_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
        self.conv3_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
        self.bn3_1 = nn.BatchNorm2d(num_feat * 8, affine=True)

        self.conv4_0 = nn.Conv2d(num_feat * 8, num_feat * 8, 3, 1, 1, bias=False)
        self.bn4_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
        self.conv4_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
        self.bn4_1 = nn.BatchNorm2d(num_feat * 8, affine=True)

        self.linear1 = nn.Linear(num_feat * 8 * 4 * 4, 100)   ### 全连接 in_features指的是输入的二维张量的大小out_features指的是输出的二维张量的大小
        self.linear2 = nn.Linear(100, 1)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # # 第一种, F.ReLU()是函数调用，一般使用在foreward函数里
        # out = torch.functional.ReLU(input) = F.ReLU(input)
        
        # # 第二种, nn.ReLU()是模块调用, 一般在定义网络层的时候使用。
        # import torch.nn as nn
        # nn.ReLU(inplace=True)
        # nn.RuLU(input)

    def forward(self, x):
        assert x.size(2) == 128 and x.size(3) == 128, (f'Input spatial size must be 128x128, but received {x.size()}.')


        feat = self.lrelu(self.conv0_0(x))
        feat = self.lrelu(self.bn0_1(self.conv0_1(feat)))  # output spatial size: (64, 64)channel:num_feat     /2

        feat = self.lrelu(self.bn1_0(self.conv1_0(feat)))
        feat = self.lrelu(self.bn1_1(self.conv1_1(feat)))  # output spatial size: (32, 32)channel:num_feat*2   /4

        feat = self.lrelu(self.bn2_0(self.conv2_0(feat)))
        feat = self.lrelu(self.bn2_1(self.conv2_1(feat)))  # output spatial size: (16, 16)channel:num_feat*4   /8

        feat = self.lrelu(self.bn3_0(self.conv3_0(feat)))
        feat = self.lrelu(self.bn3_1(self.conv3_1(feat)))  # output spatial size: (8, 8)channel:num_feat*8     /16

        feat = self.lrelu(self.bn4_0(self.conv4_0(feat)))
        feat = self.lrelu(self.bn4_1(self.conv4_1(feat)))  # output spatial size: (4, 4)channel:num_feat*8     /32

        # spatial size: (4, 4)
        feat = feat.view(feat.size(0), -1)                 # 将卷积展成一行，输出为num_feat * 8 * 4 * 4的向量
        feat = self.lrelu(self.linear1(feat))              # 卷积结果进行全连接并进行激活
        out = self.linear2(feat)                           # 对结果进行全连接并不进行激活
        return out

'''
class Discriminator(nn.Module):
    def __init__(self, input_shape):
        super(Discriminator, self).__init__()

        self.input_shape = input_shape
        in_channels, in_height, in_width = self.input_shape
        patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)
        self.output_shape = (1, patch_h, patch_w)

        def discriminator_block(in_filters, out_filters, first_block=False):
            layers = []
            layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))
            if not first_block:
                layers.append(nn.BatchNorm2d(out_filters))
            layers.append(nn.LeakyReLU(0.2, inplace=True))
            layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=3, stride=2, padding=1))
            layers.append(nn.BatchNorm2d(out_filters))
            layers.append(nn.LeakyReLU(0.2, inplace=True))
            return layers

        layers = []
        in_filters = in_channels
        for i, out_filters in enumerate([64, 128, 256, 512]):
            layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))
            in_filters = out_filters

        layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))

        self.model = nn.Sequential(*layers)

    def forward(self, img):
        return self.model(img)
'''


#################################################################
# 特征提取
# VGG [42] network pretrained on ImageNet.
# 浅层特征抽取网络，提取浅层特征。
# 低分辨率图像进入后会经过一个卷积+RELU函数，将输入通道数调整为64??
class FeatureExtractor(nn.Module): # perceptual_criteria 感知标准，继承nn.Module类
    def __init__(self, ssp_layer):               # 类的构造函数或初始化方法
        super(FeatureExtractor, self).__init__() 
        # you can try other models
        vgg16 = torchvision.models.vgg16(pretrained=True) #加载预训练模型。若只需要网络结构，则可以pretrained = False (或空着，默认为False)
        # ssp_layer, default=16, 'VGG layer that is going to be used in SSP', 取VGG19的前16层
        self.feature_extractor = nn.Sequential(*list(vgg16.features))[:ssp_layer] # VGG16微调，vgg16.features保存的是全连接层fc前面的网络结构
                                        # vgg.features是取出vgg16网络中的features大层。其中vgg网络可以分为3大层，features、avgpool、classifier

    def forward(self, img):  # 利用原始.pth模型进行前向推理
                                  # forward方法是必须要重写的，它是实现模型的功能，实现各个层之间的连接关系的核心。
        return self.feature_extractor(img)

'''
class FeatureExtractor(nn.Module):
    def __init__(self):
        super(FeatureExtractor, self).__init__()
        vgg19_model = vgg19(pretrained=True)
        self.vgg19_54 = nn.Sequential(*list(vgg19_model.features.children())[:35])

    def forward(self, img):
        return self.vgg19_54(img)
'''

if __name__ == '__main__':
    netG = NRP_resG(3, 3, 64, 23)
    netG.load_state_dict(torch.load('pretrained_purifiers/NRP_resG.pth'))
    test_sample = torch.rand(1, 3, 256, 256)
    print(netG(test_sample).size())
    #print(netG(test_sample).size())
    print(sum(p.numel() for p in netG.parameters() if p.requires_grad))