# -*- coding: utf-8 -*-
"""
@Time: 2021/11/23 9:57
@Author: 鹄望潇湘
@File: vgg_16.py
@desc: 
"""
import torch.nn as nn
import torch
import numpy

class VGG16(nn.Module):
    def __init__(self, init_weights=True):
        super(VGG16, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=30, out_channels=64, kernel_size=3, stride=1, padding=1), # 64x64x64
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1), # 64x64x64
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) # 64x32x32

        self.conv2 = nn.Sequential(
            nn.Conv2d(64, out_channels=128, kernel_size=3, stride=1, padding=1),  # 128x32x32
            nn.ReLU(inplace=True),
            nn.Conv2d(128, out_channels=128, kernel_size=3, stride=1, padding=1),  # 128x32x32
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) # 128x16x16

        self.conv3 = nn.Sequential(
            nn.Conv2d(128, out_channels=256, kernel_size=3, padding=1, stride=1),  # 256x16x16
            nn.ReLU(inplace=True),
            nn.Conv2d(256, out_channels=256, kernel_size=3, padding=1, stride=1),  # 256x16x16
            nn.ReLU(inplace=True),
            nn.Conv2d(256, out_channels=256, kernel_size=3, stride=1, padding=1),  # 256x16x16
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0))  # 256x8x8

        self.conv4 = nn.Sequential(
            nn.Conv2d(256, out_channels=512, kernel_size=3, padding=1, stride=1),  # 512x8x8
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, padding=1, stride=1),  # 512x8x8
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, stride=1, padding=1),  # 512x8x8
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) # 512x4x4

        self.conv6 = nn.Sequential(
            nn.Conv2d(512, out_channels=1024, kernel_size=1, padding=0, stride=1),  # 1024x4x4
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),

            nn.Conv2d(1024, out_channels=1024, kernel_size=1, padding=0, stride=1),  # 1024x4x4
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Conv2d(1024, out_channels=128, kernel_size=1, padding=0, stride=1),  # 128x4x4
            nn.ReLU(inplace=True),
        )

        self.fc1 = nn.Sequential(
            nn.Linear(128*4*4, 4096),
            nn.Linear(4096, 124)
        )

        if init_weights:
            self.__initialize_weights()

    def forward(self, x):
        x = self.conv1(x)  # 计算第一个卷积层的结果
        x = self.conv2(x)  # 计算第二个卷积层的结果
        conv3_x = self.conv3(x)  # 计算第三个卷积层的结果
        conv4_x = self.conv4(conv3_x)  # 计算第四个卷积层的结果
        conv6_x = self.conv6(conv4_x)  #

        x = torch.flatten(conv6_x, start_dim=1)  #
        x = self.fc1(x)  # 与第四个卷积输出融合特征

        return x

    def restore_dict(self, dict_path: str, device='cpu'):
        parameter_dict = torch.load(dict_path, map_location=device)
        self.load_state_dict(parameter_dict)

    def __initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            if isinstance(m, nn.ConvTranspose2d):
                torch.nn.init.constant_(m.weight, 0.001)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0001)

    def __bilinear_kernel(self, in_channels, out_channels, kernel_size):
        """
        :param kernel_size:
        :param out_channels:
        :param in_channels
        """
        factor = (kernel_size + 1) // 2
        if kernel_size % 2 == 1:
            center = factor - 1
        else:
            center = factor - 0.5
        og = numpy.ogrid[:kernel_size, :kernel_size]
        element = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
        weights = numpy.full(shape=(in_channels, out_channels, kernel_size, kernel_size),
                             fill_value=element, dtype=numpy.float)

        return torch.from_numpy(weights)


if __name__=='__main__':
    x = torch.randn(3, 30, 240, 300)
    vgg16 = VGG16()
    y = vgg16(x)
    print(y)
