import math
import copy

import torch
import torch.nn as nn
from torch.nn import MaxPool3d, ConvTranspose3d, Dropout, Softmax, Linear, Conv3d, LayerNorm, GroupNorm, ReLU
from torch.nn import Module, Sequential, Sigmoid, Dropout3d,PReLU
from nets import configs

class Conv3D_Block(Module):
    def __init__(self, inp_feat, out_feat, kernel=3, stride=1, padding=1, residual=None):
        super(Conv3D_Block, self).__init__()
        self.conv1 = Sequential(
            Conv3d(inp_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True),
            GroupNorm(32, out_feat),
            # ReLU(inplace=True)
            PReLU(out_feat)
            )

        self.conv2 = Sequential(
            Conv3d(out_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True),
            GroupNorm(32, out_feat),
            # ReLU(inplace=True)
            PReLU(out_feat)
            )

        self.residual = residual

        if self.residual is not None:
            self.residual_upsampler = Conv3d(inp_feat, out_feat, kernel_size=1, bias=False)

    def forward(self, x):
        if self.residual is None:
            return self.conv2(self.conv1(x))
        else:
            res = x
            return self.conv2(self.conv1(x)) + self.residual_upsampler(res)

class Deconv3D_Block(Module):
    def __init__(self, inp_feat, out_feat, kernel=3, stride=2, padding=1):
        super(Deconv3D_Block, self).__init__()
        self.deconv = Sequential(
            ConvTranspose3d(inp_feat, out_feat, kernel_size=(kernel, kernel, kernel),
                            stride=(stride, stride, stride), padding=(padding, padding, padding), output_padding=1, bias=True),
            # ReLU(inplace=True)
            PReLU(out_feat))

    def forward(self, x):
        return self.deconv(x)

class Attention(nn.Module):
    def __init__(self, config):
        super(Attention, self).__init__()
        self.num_attention_heads = config.trans["num_heads"]                            # 12
        self.attention_head_size = int(config.hidden_size / self.num_attention_heads)   # 768/12=64
        self.all_head_size = self.num_attention_heads * self.attention_head_size              # 12*64=768

        self.query = Linear(config.hidden_size, self.all_head_size) # 768->768
        self.key = Linear(config.hidden_size, self.all_head_size)      # 768->768
        self.value = Linear(config.hidden_size, self.all_head_size)  # 768->768

        self.out = Linear(config.hidden_size, config.hidden_size)   # 768->768
        self.attn_dropout = Dropout(config.trans["att_dropout_rate"]) # rate = 0.0
        self.proj_dropout = Dropout(config.trans["att_dropout_rate"]) # rate = 0.0

        self.softmax = Softmax(dim=-1)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states):
        # get "q, k, v"
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)

        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)

        # get "a" and "a-head"
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)

        # get "o"
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
        attention_output = self.out(context_layer)
        attention_output = self.proj_dropout(attention_output)
        return attention_output

class Mlp(nn.Module):
    def __init__(self, config):
        super(Mlp, self).__init__()
        self.fc1 = Linear(config.hidden_size, config.trans["mlp_dim"]) # 768->3072
        self.fc2 = Linear(config.trans["mlp_dim"], config.hidden_size) # 3072->768
        self.act_fn = nn.functional.gelu                                                                              # act func
        self.dropout = Dropout(config.trans["dropout_rate"])                  # rate = 0.1

        self._init_weights()

    def _init_weights(self):
        nn.init.xavier_uniform_(self.fc1.weight)
        nn.init.xavier_uniform_(self.fc2.weight)
        nn.init.normal_(self.fc1.bias, std=1e-6)
        nn.init.normal_(self.fc2.bias, std=1e-6)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act_fn(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.dropout(x)
        return x

class Embeddings(nn.Module):
    """Construct the embeddings from patch, position embeddings.
    """
    def __init__(self, config, img_size, inp_feat):
        super(Embeddings, self).__init__()
        hidden_size = config.hidden_size
        patch_size = config.patches
        n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2])
        self.patch_embeddings = Conv3d(inp_feat, hidden_size, kernel_size=2, stride=2, bias=True)
        self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, hidden_size))
        self.dropout = Dropout(config.trans["dropout_rate"])

    def forward(self, x):
        x = self.patch_embeddings(x)   # (B, 768, 6, 8, 8)
        x = x.flatten(2, 4)                             # (B, 768, 384)
        x = x.transpose(-1, -2)                   # (B, n_patches, hidden)
        embeddings = x + self.position_embeddings
        embeddings = self.dropout(embeddings)
        return embeddings

class Block(nn.Module):
    def __init__(self, config):
        super(Block, self).__init__()
        hidden_size = config.hidden_size   # 768
        self.attention_norm = LayerNorm(hidden_size, eps=1e-6)
        self.ffn_norm = LayerNorm(hidden_size, eps=1e-6)
        self.ffn = Mlp(config)
        self.attn = Attention(config)

    def forward(self, x):
        h = x
        x = self.attention_norm(x)
        x = self.attn(x)
        x = x + h

        h = x
        x = self.ffn_norm(x)
        x = self.ffn(x)
        x = x + h
        return x

class Encoder(nn.Module):
    def __init__(self, config):
        super(Encoder, self).__init__()
        self.layer = nn.ModuleList()
        self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
        for _ in range(config.trans["num_layers"]):
            layer = Block(config)
            self.layer.append(copy.deepcopy(layer))

    def forward(self, hidden_states):
        for layer_block in self.layer:
            hidden_states = layer_block(hidden_states)
        encoded = self.encoder_norm(hidden_states)
        return encoded

class Transformer(nn.Module):
    def __init__(self, config, img_size, inp_feat, out_feat):
        super(Transformer, self).__init__()
        self.img_size = img_size
        self.embeddings = Embeddings(config, img_size, inp_feat)
        self.encoder = Encoder(config)
        self.conv = Sequential(
            Conv3d(config.hidden_size, out_feat, kernel_size=3, stride=1, padding=1, bias=True),
            GroupNorm(32, out_feat),
            # ReLU(inplace=True)
            PReLU(out_feat)
        )

    def forward(self, input_ids):
        embedding_output = self.embeddings(input_ids)  # (B, n_patch, hidden)
        encoded = self.encoder(embedding_output)  # (B, n_patch, hidden)
        B, _, hidden = encoded.size()
        d, h, w = self.img_size[0] // 16, self.img_size[1] // 16, self.img_size[2] // 16
        x = encoded.permute(0, 2, 1)   # (B, hidden, n_patch)
        x = x.contiguous().view(B, hidden, d, h, w)
        out = self.conv(x)
        return out

class Trans_UNet(nn.Module):
    def __init__(self, img_size=(96, 128, 128), img_channels=1, num_channels=5,  feat_channels=[32, 64, 128, 256, 512], residual=None):
        super(Trans_UNet, self).__init__()
        # Encoder downsamplers
        self.pool1 = MaxPool3d((2, 2, 2))
        self.pool2 = MaxPool3d((2, 2, 2))
        self.pool3 = MaxPool3d((2, 2, 2))
        # self.pool4 = MaxPool3d((2, 2, 2))

        # Encoder convolutions
        self.conv_blk1 = Conv3D_Block(img_channels, feat_channels[0], residual=residual)
        self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual)
        self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual)
        self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual)
        # self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual)

        # Transformer Block
        self.trans_blk = Transformer(configs.get_b16_config(), img_size, feat_channels[3], feat_channels[4])

        # Decoder convolutions
        self.dec_conv_blk4 = Conv3D_Block(2 * feat_channels[3], feat_channels[3], residual=residual)
        self.dec_conv_blk3 = Conv3D_Block(2 * feat_channels[2], feat_channels[2], residual=residual)
        self.dec_conv_blk2 = Conv3D_Block(2 * feat_channels[1], feat_channels[1], residual=residual)
        self.dec_conv_blk1 = Conv3D_Block(2 * feat_channels[0], feat_channels[0], residual=residual)

        # Decoder upsamplers
        self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
        self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
        self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
        self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

        # Final 1*1 Conv Segmentation map
        self.one_conv = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True)

        # Activation function
        self.sigmoid = Sigmoid()

    def forward(self, x):
        # Encoder part
        x1 = self.conv_blk1(x)

        x_low1 = self.pool1(x1)
        x2 = self.conv_blk2(x_low1)

        x_low2 = self.pool2(x2)
        x3 = self.conv_blk3(x_low2)

        x_low3 = self.pool3(x3)
        x4 = self.conv_blk4(x_low3)

        # x_low4 = self.pool4(x4)
        # base = self.conv_blk5(x_low4)
        base = self.trans_blk(x4)

        # Decoder part
        d4 = torch.cat([self.deconv_blk4(base), x4], dim=1)
        d_high4 = self.dec_conv_blk4(d4)

        d3 = torch.cat([self.deconv_blk3(d_high4), x3], dim=1)
        d_high3 = self.dec_conv_blk3(d3)
        d_high3 = Dropout3d(p=0.5)(d_high3)

        d2 = torch.cat([self.deconv_blk2(d_high3), x2], dim=1)
        d_high2 = self.dec_conv_blk2(d2)
        d_high2 = Dropout3d(p=0.5)(d_high2)

        d1 = torch.cat([self.deconv_blk1(d_high2), x1], dim=1)
        d_high1 = self.dec_conv_blk1(d1)

        seg = self.sigmoid(self.one_conv(d_high1))
        return seg