# -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 14:57:53 2021

@author: dewol
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torchvision import models

"""
This class takes the layer of the resnet 50 netwerk as intput, and only keeps 
the first few layer and defines a forward pass for these
"""


class Resnet(nn.Module):

    def __init__(self, in_channels, net_layers):
        super(Resnet, self).__init__()
        # change the first convolution since it expected 3 RGB channels as input
        self.layer0 = nn.Conv2d(in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)

        self.layer1 = nn.Sequential(*net_layers[1:3])
        self.layer2 = nn.Sequential(*net_layers[3:5])
        self.layer3 = nn.Sequential(*net_layers[5])
        self.layer4 = nn.Sequential(*net_layers[6])

    def forward(self, x):
        y1 = self.layer0(x)
        y1 = self.layer1(y1)

        y2 = self.layer2(y1)

        y3 = self.layer3(y2)

        out = self.layer4(y3)

        return y1, y2, y3, out


"""
make the self attention structure

similar implementation as in lab 7.3
"""


class MSA(nn.Module):

    def __init__(self, hidden_size):
        super(MSA, self).__init__()

        num_heads = 12  # hyperparameter to match pretrained weights size

        self.n_heads = num_heads
        self.k = hidden_size // self.n_heads

        self.query = nn.Linear(hidden_size, self.k * self.n_heads)  ###  nn.Linear 输入输出维度???
        self.key = nn.Linear(hidden_size, self.k * self.n_heads)
        self.value = nn.Linear(hidden_size, self.k * self.n_heads)

        self.unifyheads = nn.Linear(self.k * self.n_heads, hidden_size)

    def forward(self, x):
        b, t = x.size()[0:2]
        h = self.n_heads

        q = self.query(x).view(b, t, h, self.k)
        key = self.key(x).view(b, t, h, self.k)
        v = self.value(x).view(b, t, h, self.k)

        # fold head dimension back into batch dimension
        q = q.transpose(1, 2).reshape(b * h, t, self.k)
        key = key.transpose(1, 2).reshape(b * h, t, self.k)
        v = v.transpose(1, 2).reshape(b * h, t, self.k)

        w_prime = torch.bmm(q, key.transpose(1, 2))
        w_prime = w_prime / math.sqrt(self.k)
        w = F.softmax(w_prime, dim=2)

        # compute y and reshape from b*h to b,h
        y = torch.bmm(w, v).view(b, h, t, self.k)
        y = y.transpose(1, 2).reshape(b, t, h * self.k)
        y = self.unifyheads(y)

        return y


"""
make the multilayer perceptron. Again very similar as implemented in Lab7.3
"""


class MLP(nn.Module):

    def __init__(self, hidden_size):
        percep_dim = 3072  # hyper parameter needed because of pretrained weights

        super(MLP, self).__init__()
        self.linear1 = nn.Linear(hidden_size, percep_dim)
        self.linear2 = nn.Linear(percep_dim, hidden_size)
        self.activate = nn.ReLU()

    def forward(self, x):
        # could add dropout after activation and linear2
        x = self.linear1(x)
        x = self.activate(x)  ## 两次线性变化后再激活？
        x = self.linear2(x)

        return x


"""
make the block with MSA and MLP with the residual connections and the layer 
this is the yellow block on the left of figure 1 in the paper

"""


class MSA_MLP_block(nn.Module):

    def __init__(self, hidden_size):
        super(MSA_MLP_block, self).__init__()

        self.h_size = hidden_size

        self.attent_layer = MSA(self.h_size)
        self.norm1 = nn.LayerNorm(self.h_size)

        self.percep_layer = MLP(self.h_size)
        self.norm2 = nn.LayerNorm(self.h_size)

    def forward(self, x):
        y = x
        x = self.norm1(x)
        x = self.attent_layer(x) + y

        y = x
        x = self.norm2(x)
        x = self.percep_layer(x) + y

        return x


"""
make the transformer block, it consists of performing the MSA and MLP block 
multiple time, on the embedded input. This is the green block in figure 1
of the paper.

it is assumed for now that the images are square

Note that the paper pretraines on imagenet the transformer and CNN as a whole,
we use their pretrained transformer, but the trained CNN of pytorch, this can 
thus make a difference or create redundancy
"""


####### ？？？？？？？？？？？？？？初始化权重吗    #############
def reform(loaded_weights):
    if loaded_weights.ndim == 3:
        reshap = torch.from_numpy(loaded_weights).view(768, 768).t()
    else:
        reshap = torch.from_numpy(loaded_weights).view(768)
    return nn.Parameter(reshap)


def change_totens(loaded_weights):
    return nn.Parameter(torch.from_numpy(loaded_weights))


class Transformer(nn.Module):

    def __init__(self, hidden_size, img_size, num=12, patch_size=1):

        """
        num is the number of times that the MSA_MLP block is repeated, this is 12
        in the paper. Use ModuleList for iteration 12 times through the same block.

        input img_size, is the size that is the input to Resnet50, so the input size
        that we need to embed is actually smaller
        """

        super(Transformer, self).__init__()

        # this part is for the embedding
        shrink_factor = 16  # R50 structure decrease H and W dimesnsion with factor of 8 and input size is 1/2 of orginial
        channels = 64 * shrink_factor  # number of channels after resnet layer

        patch_size_2 = patch_size * shrink_factor
        num_patches = (img_size // patch_size_2) ** 2

        self.embed_patch = nn.Conv2d(channels, hidden_size, patch_size, patch_size)
        self.embed_pos = nn.Parameter(torch.zeros(1, num_patches, hidden_size))

        self.layers = nn.ModuleList([MSA_MLP_block(hidden_size) for i in range(num)])

        self.norm = nn.LayerNorm(hidden_size)

        self.init_weight()

    #################                      初始化权重                   #############################################
    def init_weight(self):
        w = np.load('/home/jiayu/MyProject_2022/TransUNET_reproduction-main/imagenet21k_R50+ViT-B_16.npz')

        for j in range(len(self.layers)):
            trans_block = "Transformer/encoderblock_" + str(j)

            self.layers[j].attent_layer.query.weight = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/query/kernel"])
            self.layers[j].attent_layer.query.bias = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/query/bias"])

            self.layers[j].attent_layer.key.weight = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/key/kernel"])
            self.layers[j].attent_layer.key.bias = reform(w[trans_block + "/MultiHeadDotProductAttention_1/key/bias"])

            self.layers[j].attent_layer.value.weight = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/value/kernel"])
            self.layers[j].attent_layer.value.bias = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/value/bias"])

            self.layers[j].attent_layer.unifyheads.weight = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/out/kernel"])
            self.layers[j].attent_layer.unifyheads.bias = reform(
                w[trans_block + "/MultiHeadDotProductAttention_1/out/bias"])

            self.layers[j].norm1.weight = reform(w[trans_block + "/LayerNorm_0/scale"])
            self.layers[j].norm1.bias = reform(w[trans_block + "/LayerNorm_0/bias"])

            self.layers[j].percep_layer.linear1.weight = change_totens(w[trans_block + "/MlpBlock_3/Dense_0/kernel"].T)
            self.layers[j].percep_layer.linear1.bias = change_totens(w[trans_block + "/MlpBlock_3/Dense_0/bias"])

            self.layers[j].percep_layer.linear2.weight = change_totens(w[trans_block + "/MlpBlock_3/Dense_1/kernel"].T)
            self.layers[j].percep_layer.linear2.bias = change_totens(w[trans_block + "/MlpBlock_3/Dense_1/bias"])

            self.layers[j].norm2.weight = reform(w[trans_block + "/LayerNorm_2/scale"])
            self.layers[j].norm2.bias = reform(w[trans_block + "/LayerNorm_2/bias"])

    def forward(self, x):
        # print(x.size())
        x = self.embed_patch(x)
        # print(x.size())
        x = x.flatten(2)  # flatten the dimension that contain the number of patches
        # print(x.size())
        x = x.transpose(-1, -2)  # swap around patches and hidden dimension
        # print(x.size(), self.embed_pos.size())
        x = x + self.embed_pos  ####词嵌入

        for layer in self.layers:  ### 多块层MSA MLP残差连接等
            x = layer(x)

        return self.norm(x)  ###每一块输出的标准化


"""
This is the decoder with the Unet structure. Not that the paper is not clear 
about the size of the resnet skip connection. The given skip connections are 
printed by the cell above, but but not that these layers do not half in number
of channels, but half and a factor of 4. This is inconsistent and should be 
was with an TA
"""


def double_conv(in_channels, out_channels):
    return nn.Sequential(
        nn.Conv2d(in_channels, out_channels, 3, padding=1),
        nn.ReLU(inplace=True),
        nn.Conv2d(out_channels, out_channels, 3, padding=1),
        nn.ReLU(inplace=True)
    )


class Decod(nn.Module):
    """
    make the upward decoder structure

    """

    def __init__(self, n_class, hidden_size):
        super().__init__()
        N = 16

        self.maxpool = nn.MaxPool2d(2)
        self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)

        self.dconv5 = double_conv(hidden_size, 512)
        self.dconv_up4 = double_conv(32 * N + 32 * N, 16 * N)
        self.dconv_up3 = double_conv(16 * N + 16 * N, 8 * N)
        self.dconv_up2 = double_conv(4 * N + 8 * N, 4 * N)
        self.dconv_up1 = double_conv(4 * N, N)

        self.conv_last = nn.Conv2d(N, n_class, 3, stride=1, padding=3 // 2)
        self.sigmoid = nn.Sigmoid()

    """
    make the forward pass
  
    """

    def forward(self, x, skip1, skip2, skip3):
        x = self.dconv5(x)

        x = self.upsample(x)
        x = torch.cat([x, skip3], dim=1)

        x = self.dconv_up4(x)
        x = self.upsample(x)
        x = torch.cat([x, skip2], dim=1)

        x = self.dconv_up3(x)
        x = self.upsample(x)
        x = torch.cat([x, skip1], dim=1)

        x = self.dconv_up2(x)
        x = self.upsample(x)

        x = self.dconv_up1(x)

        y = self.conv_last(x)
        y = self.sigmoid(y)

        return y


"""
This class ties everything together. It contains the upward path 
the yellow transformer, and the green block of mutliple transformer layers as 
depicted in figure 1 of the paper together with the CNN
"""


class TransUnet(nn.Module):

    def __init__(self, in_channels, R_layers, hidden_size=768, img_size=384):
        super(TransUnet, self).__init__()
        num_classes = 1  # 9  # 8 classes and background

        # self.CNN = Resnet(list(R_layers.children()))
        self.CNN = Resnet(in_channels, list(R_layers.children()))
        self.tr = Transformer(hidden_size, img_size)

        self.cup = Decod(num_classes, hidden_size)

    def forward(self, x):
        skip1, skip2, skip3, x = self.CNN(x)
        # print("skip1.shape:", skip1.shape)

        x = self.tr(x)

        # reshape the output of the transformer
        batch_size, n_patch, hid_size = x.shape
        x = torch.reshape(x, [batch_size, hid_size, int(math.sqrt(n_patch)), int(math.sqrt(n_patch))])

        predicted = self.cup(x, skip1, skip2, skip3)

        return predicted


class TriTransUnet(nn.Module):
    def __init__(self, R_layers):
        super(TriTransUnet, self).__init__()
        # Full_Rnet = models.resnet50(pretrained=True)
        self.TransUnet1 = TransUnet(1,R_layers)
        self.TransUnet2 = TransUnet(2,R_layers)
        self.TransUnet3 = TransUnet(2,R_layers)

    def forward(self, x):
        out1 = self.TransUnet1(x)
        # print("out1: ", out1.shape)
        out1_1 = torch.cat((x, out1), dim=1)
        # print("out1_1: ", out1_1.shape)
        out2 = self.TransUnet2(out1_1)
        # print("out2: ", out2.shape)
        out2_2 = torch.cat((out2, x), dim=1)
        # print("out2_2: ", out2_2.shape)
        out3 = self.TransUnet3(out2_2)
        # print("out3:", out3.shape)

        return out1, out2, out3
#
# if __name__ == '__main__':
#     image = torch.Tensor(2,1,384,384)
#     out = TriTransUnet(image)
#     print(out.shape)
