# -*- coding: utf-8 -*-
"""
@Time 2020/12/14 14:18
@Author 鹄望潇湘
@File FCN_VGG16.py
@Desc 包含FCN_VGG16网络的主体结构（FCN_VGG16类)以及所使用的Loss函数(PixelCrossEntropyLoss类)
"""

import torch.nn as nn
import torch
import numpy


class FCN_VGG16(nn.Module):
    """
    该FCN_VGG16是在VGG16网络上改造而来，以便用于语义分割任务。相较于VGG16，该网络将最后的全连接层全部去除，替换成了三个反卷积层。

    该网络使用交叉熵作为损失函数，使用SGD优化器

    :param init_weights: 是否需要初始化权重参数

    """
    def __init__(self, init_weights=True):
        super(FCN_VGG16, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0))

        self.conv2 = nn.Sequential(
            nn.Conv2d(64, out_channels=128, kernel_size=3, stride=1, padding=1),  # 5
            nn.ReLU(inplace=True),
            nn.Conv2d(128, out_channels=128, kernel_size=3, stride=1, padding=1),  # 7
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0))

        self.conv3 = nn.Sequential(
            nn.Conv2d(128, out_channels=256, kernel_size=3, padding=1, stride=1),  # 10
            nn.ReLU(inplace=True),
            nn.Conv2d(256, out_channels=256, kernel_size=3, padding=1, stride=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, out_channels=256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0))

        self.conv4 = nn.Sequential(
            nn.Conv2d(256, out_channels=512, kernel_size=3, padding=1, stride=1),  # 17
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, padding=1, stride=1),  # 19
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0))

        self.conv5 = nn.Sequential(
            nn.Conv2d(512, out_channels=512, kernel_size=3, padding=1, stride=1),  # 24
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, padding=1, stride=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, out_channels=512, kernel_size=3, stride=1, padding=1),  # 28
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        )

        self.conv6 = nn.Sequential(
            nn.Dropout(0.5),
            nn.Conv2d(512, out_channels=4096, kernel_size=1, padding=0, stride=1),
            nn.ReLU(inplace=True),

            nn.Dropout(0.5),
            nn.Conv2d(4096, out_channels=4096, kernel_size=1, padding=0, stride=1),
            nn.ReLU(inplace=True),

            nn.Conv2d(4096, out_channels=22, kernel_size=1, padding=0, stride=1),
            nn.ReLU(inplace=True),
        )

        self.deconvolution_2_1 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=22, out_channels=512, kernel_size=3, stride=2, padding=1,
                               output_padding=1, dilation=1),
            nn.ReLU(inplace=True))

        self.deconvolution_2_2 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3, stride=2, padding=1,
                               output_padding=1, dilation=1),
            nn.ReLU(inplace=True))

        self.deconvolution_8 = nn.ConvTranspose2d(in_channels=256, out_channels=22, kernel_size=8, stride=8, padding=0)

        if init_weights:
            self.__initialize_weights()

        self.optimizer = torch.optim.SGD(self.parameters(), lr=0.3, dampening=0.9, momentum=0.0016)
        self.loss_function = PixelCrossEntropyLoss()

    def forward(self, x):
        x = self.conv1(x)  # 计算第一个卷积层的结果
        x = self.conv2(x)  # 计算第二个卷积层的结果
        conv3_x = self.conv3(x)  # 计算第三个卷积层的结果
        conv4_x = self.conv4(conv3_x)  # 计算第四个卷积层的结果
        conv5_x = self.conv5(conv4_x)  # 计算第五个卷积层的结果
        conv6_x = self.conv6(conv5_x)  #

        x = self.deconvolution_2_1(conv6_x)  # 2倍反卷积上采样
        x = torch.add(x, conv4_x)  # 与第四个卷积输出融合特征
        x = self.deconvolution_2_2(x)  # 2倍反卷积上采样
        x = torch.add(x, conv3_x)  # 与第三个卷积输出融合特征

        x = self.deconvolution_8(x)  # 8倍反卷积上采样，此时恢复到原图大小
        return x

    def restore_dict(self, dict_path: str, device='cpu'):
        parameter_dict = torch.load(dict_path, map_location=device)
        self.load_state_dict(parameter_dict)

    def __initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) :
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            if isinstance(m, nn.ConvTranspose2d):
                torch.nn.init.constant_(m.weight, 0.001)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0001)

    def __bilinear_kernel(self, in_channels, out_channels, kernel_size):
        """
        :param kernel_size:
        :param out_channels:
        :param in_channels
        """
        factor = (kernel_size + 1) // 2
        if kernel_size % 2 == 1:
            center = factor - 1
        else:
            center = factor - 0.5
        og = numpy.ogrid[:kernel_size, :kernel_size]
        element = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
        weights = numpy.full(shape=(in_channels, out_channels, kernel_size, kernel_size),
                             fill_value=element, dtype=numpy.float)

        return torch.from_numpy(weights)


class PixelCrossEntropyLoss(nn.Module):
    """
    像素级交叉熵损失函数类，即对预测结果的每一个像素进行交叉熵计算。因此计算时输入的Tensor应该是四维的，第一维是batch_size，
    第二维是类别数, 第三维是图片的宽度，第四维是图片的高度。

    """
    def __init__(self):
        super(PixelCrossEntropyLoss, self).__init__()
        weights = torch.from_numpy(numpy.array([0.45, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 97, 97, 97,
                                   97, 97, 97, 97, 97, 97, 97, 97, 97, 97,
                                   97, 97], dtype=numpy.float32))
        self.cross_entropy = torch.nn.CrossEntropyLoss(ignore_index=1)

    def forward(self, predict: torch.Tensor, real_label: torch.Tensor):
        """

        :param predict: 网络预测值矩阵,这应该是一个四维向量(N,C,H,W)
        :type predict: torch.Tensor
        :param real_label: 图片真实类矩阵，这应该是一个3维向量(N,H,W)
        :return: 损失值
        """

        # batch = predict.shape[0]
        # loss = torch.Tensor([0.0])
        """
        for index in range(0, batch):
            single_predict = predict[index, :, :, :]
            single_real = real_label[index, :, :, :]
            predict_flat = single_predict.reshape(single_predict.shape[0],
                                                  single_predict.shape[1]*single_predict.shape[2])
            # predict_flat = predict_flat.t()
            real_flat = single_real.reshape(single_real.shape[1]*single_predict.shape[2]).t()
            loss = torch.add(loss.to(device), self.cross_entropy(predict_flat, real_flat))
        """
        result = self.cross_entropy(predict, real_label)
        return result

