#!/usr/bin/env python3


# ! <<< Self
import torch
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
from monai.networks.blocks.dynunet_block import UnetResBlock, get_conv_layer
# from monai.networks.nets.unetr import UNETR
# ! >>>

from typing import Sequence, Tuple, Union

import torch.nn as nn

from monai.networks.blocks.dynunet_block import UnetOutBlock, UnetBasicBlock
from monai.networks.blocks.unetr_block import UnetrBasicBlock, UnetrPrUpBlock, UnetrUpBlock
from monai.networks.nets.vit import ViT
from monai.utils import ensure_tuple_rep


class MyUnetrPrUpBlock(nn.Module):
    """
    A projection upsampling module that can be used for UNETR: "Hatamizadeh et al.,
    UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
    """

    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        num_layer: int,
        kernel_size: Union[Sequence[int], int],
        stride: Union[Sequence[int], int],
        upsample_kernel_size: Union[Sequence[int], int],
        norm_name: Union[Tuple, str],
        conv_block: bool = False,
        res_block: bool = False,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions.
            in_channels: number of input channels.
            out_channels: number of output channels.
            num_layer: number of upsampling blocks.
            kernel_size: convolution kernel size.
            stride: convolution stride.
            upsample_kernel_size: convolution kernel size for transposed convolution layers.
            norm_name: feature normalization type and arguments.
            conv_block: bool argument to determine if convolutional block is used.
            res_block: bool argument to determine if residual block is used.

        """

        super().__init__()

        upsample_stride = upsample_kernel_size
        self.transp_conv_init = get_conv_layer(
            spatial_dims,
            in_channels,
            out_channels,
            kernel_size=upsample_kernel_size,
            stride=upsample_stride,
            conv_only=True,
            is_transposed=True,
        )
        if conv_block:
            if res_block:
                self.blocks = nn.ModuleList([nn.Sequential(
                            get_conv_layer(
                                spatial_dims,
                                out_channels,
                                out_channels,
                                kernel_size=upsample_kernel_size,
                                stride=upsample_stride,
                                conv_only=True,
                                is_transposed=True,
                            ),
                            UnetResBlock(
                                spatial_dims=spatial_dims,
                                in_channels=out_channels,
                                out_channels=out_channels,
                                kernel_size=kernel_size,
                                stride=stride,
                                norm_name=norm_name,
                            ),
                        ) for _ in range(num_layer)])
            else:
                self.blocks = nn.ModuleList([nn.Sequential(
                            get_conv_layer(
                                spatial_dims,
                                out_channels,
                                out_channels,
                                kernel_size=upsample_kernel_size,
                                stride=upsample_stride,
                                conv_only=True,
                                is_transposed=True,
                            ),
                            UnetBasicBlock(
                                spatial_dims=spatial_dims,
                                in_channels=out_channels,
                                out_channels=out_channels,
                                kernel_size=kernel_size,
                                stride=stride,
                                norm_name=norm_name,
                            ),
                        ) for _ in range(num_layer)])
        else:
            self.blocks = nn.ModuleList([get_conv_layer(
                        spatial_dims,
                        out_channels,
                        out_channels,
                        kernel_size=upsample_kernel_size,
                        stride=upsample_stride,
                        conv_only=True,
                        is_transposed=True,
                    ) for _ in range(num_layer)])
        # ! <<< Add before blocks
        self.blocks.insert(0,
            get_conv_layer(
                spatial_dims,
                out_channels + out_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                conv_only=True,
            ),
        )
        # ! >>>

    def forward(self, x1, x2):
        x1 = self.transp_conv_init(x1)
        x2 = self.transp_conv_init(x2)
        x = torch.cat((x1, x2), dim=1)
        for blk in self.blocks:
            x = blk(x)
        return x


class MyUnetrUpBlock(nn.Module):
    """
    An upsampling module that can be used for UNETR: "Hatamizadeh et al.,
    UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
    """

    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernel_size: Union[Sequence[int], int],
        upsample_kernel_size: Union[Sequence[int], int],
        norm_name: Union[Tuple, str],
        res_block: bool = False,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernel_size: convolution kernel size.
            upsample_kernel_size: convolution kernel size for transposed convolution layers.
            norm_name: feature normalization type and arguments.
            res_block: bool argument to determine if residual block is used.

        """

        super().__init__()
        upsample_stride = upsample_kernel_size
        self.transp_conv = get_conv_layer(
            spatial_dims,
            in_channels,
            out_channels,
            kernel_size=upsample_kernel_size,
            stride=upsample_stride,
            conv_only=True,
            is_transposed=True,
        )

        if res_block:
            self.conv_block = UnetResBlock(
                spatial_dims,
                # ! <<<
                # out_channels + out_channels,
                out_channels + out_channels + out_channels + out_channels,
                # ! >>>
                out_channels,
                kernel_size=kernel_size,
                stride=1,
                norm_name=norm_name,
            )
        else:
            self.conv_block = UnetBasicBlock(  # type: ignore
                spatial_dims,
                # ! <<<
                # out_channels + out_channels,
                out_channels + out_channels + out_channels + out_channels,
                # ! >>>
                out_channels,
                kernel_size=kernel_size,
                stride=1,
                norm_name=norm_name,
            )

    def forward(self, inp, inp2, skip1, skip2):
        # inp.shape = (1, 768, 6, 6, 6)
        # inp2.shape = (1, 768, 6, 6, 6)
        # skip1.shape = (1, 128, 12, 12, 12)
        # skip2.shape = (1, 128, 12, 12, 12)

        # number of channels for skip should equals to out_channels
        # ! <<<
        # inp.shape = (1, 128, 12, 12, 12)
        inp = self.transp_conv(inp)
        # inp2.shape = (1, 128, 12, 12, 12)
        inp2 = self.transp_conv(inp2)
        # out.shape = (1, 384 + 128, 12, 12, 12)
        out = torch.cat((inp, inp2, skip1, skip2), dim=1)

        # out.shape = (1, 256, 12, 12, 12)
        # out = torch.cat((out, skip), dim=1)
        # ! >>>
        # Orig out.shape = (1, 128, 12, 12, 12)
        out = self.conv_block(out)
        return out


class MyUNETR(nn.Module):
    """
    UNETR based on: "Hatamizadeh et al.,
    UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
    """

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        img_size: Union[Sequence[int], int],
        feature_size: int = 16,
        hidden_size: int = 768,
        mlp_dim: int = 3072,
        num_heads: int = 12,
        pos_embed: str = "perceptron",
        norm_name: Union[Tuple, str] = "instance",
        conv_block: bool = True,
        res_block: bool = True,
        dropout_rate: float = 0.0,
        spatial_dims: int = 3,
    ) -> None:
        """
        Args:
            in_channels: dimension of input channels.
            out_channels: dimension of output channels.
            img_size: dimension of input image.
            feature_size: dimension of network feature size.
            hidden_size: dimension of hidden layer.
            mlp_dim: dimension of feedforward layer.
            num_heads: number of attention heads.
            pos_embed: position embedding layer type.
            norm_name: feature normalization type and arguments.
            conv_block: bool argument to determine if convolutional block is used.
            res_block: bool argument to determine if residual block is used.
            dropout_rate: faction of the input units to drop.
            spatial_dims: number of spatial dims.

        Examples::

            # for single channel input 4-channel output with image size of (96,96,96), feature size of 32 and batch norm
            >>> net = UNETR(in_channels=1, out_channels=4, img_size=(96,96,96), feature_size=32, norm_name='batch')

             # for single channel input 4-channel output with image size of (96,96), feature size of 32 and batch norm
            >>> net = UNETR(in_channels=1, out_channels=4, img_size=96, feature_size=32, norm_name='batch', spatial_dims=2)

            # for 4-channel input 3-channel output with image size of (128,128,128), conv position embedding and instance norm
            >>> net = UNETR(in_channels=4, out_channels=3, img_size=(128,128,128), pos_embed='conv', norm_name='instance')

        """

        super().__init__()

        if not (0 <= dropout_rate <= 1):
            raise ValueError("dropout_rate should be between 0 and 1.")

        if hidden_size % num_heads != 0:
            raise ValueError("hidden_size should be divisible by num_heads.")

        self.num_layers = 12
        img_size = ensure_tuple_rep(img_size, spatial_dims)  # ! Will be a cube.
        self.patch_size = ensure_tuple_rep(16, spatial_dims) # ! 16 * 16 * 16
        self.feat_size = tuple(img_d // p_d for img_d, p_d in zip(img_size, self.patch_size))
        self.hidden_size = hidden_size
        self.classification = False
        self.vit1 = ViT(
            in_channels=in_channels,
            img_size=img_size,
            patch_size=self.patch_size,
            hidden_size=hidden_size,
            mlp_dim=mlp_dim,
            num_layers=self.num_layers,
            num_heads=num_heads,
            pos_embed=pos_embed,
            classification=self.classification,
            dropout_rate=dropout_rate,
            spatial_dims=spatial_dims,
        )
        self.encoder1_1 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=in_channels,
            out_channels=feature_size,
            kernel_size=3,
            stride=1,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.encoder1_2 = UnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 2,
            num_layer=2,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.encoder1_3 = UnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 4,
            num_layer=1,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.encoder1_4 = UnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 8,
            num_layer=0,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.decoder1_5 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 8,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder1_4 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 8,
            out_channels=feature_size * 4,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder1_3 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 4,
            out_channels=feature_size * 2,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder1_2 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 2,
            out_channels=feature_size,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.out1 = UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels)

        self.vit2 = ViT(
            # in_channels=in_channels,
            in_channels=out_channels + in_channels,
            img_size=img_size,
            patch_size=self.patch_size,
            hidden_size=hidden_size,
            mlp_dim=mlp_dim,
            num_layers=self.num_layers,
            num_heads=num_heads,
            pos_embed=pos_embed,
            classification=self.classification,
            dropout_rate=dropout_rate,
            spatial_dims=spatial_dims,
        )
        self.encoder2_1 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=out_channels + in_channels,
            out_channels=feature_size,
            kernel_size=3,
            stride=1,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.encoder2_2 = MyUnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 2,
            num_layer=2,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.encoder2_3 = MyUnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 4,
            num_layer=1,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.encoder2_4 = MyUnetrPrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,
            out_channels=feature_size * 8,
            num_layer=0,
            kernel_size=3,
            stride=1,
            upsample_kernel_size=2,
            norm_name=norm_name,
            conv_block=conv_block,
            res_block=res_block,
        )
        self.decoder2_5 = MyUnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=hidden_size,  # 768
            out_channels=feature_size * 8,  # 16 * 8
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder2_4 = MyUnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 8,
            out_channels=feature_size * 4,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder2_3 = MyUnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 4,
            out_channels=feature_size * 2,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.decoder2_2 = MyUnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 2,
            out_channels=feature_size,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=res_block,
        )
        self.out2 = UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels)


    def proj_feat(self, x, hidden_size, feat_size):
        new_view = (x.size(0), *feat_size, hidden_size)
        x = x.view(new_view)
        new_axes = (0, len(x.shape) - 1) + tuple(d + 1 for d in range(len(feat_size)))
        x = x.permute(new_axes).contiguous()
        return x

    def forward(self, x_in, training=False):
        # x1.shape = (1, 216, 768)
        # len(hidden_states_out1) = 12
        x1, hidden_states_out1 = self.vit1(x_in)
        # enc1_1.shape = (1, 16, 96, 96, 96)
        enc1_1 = self.encoder1_1(x_in)
        # x1_2.shape = (1, 216, 768)
        x1_2 = hidden_states_out1[2]
        # x1_2.shape = (1, 768, 6, 6, 6)
        x1_2 = self.proj_feat(x1_2, self.hidden_size, self.feat_size)
        # enc1_2.shape = (1, 32, 48, 48, 48)
        enc1_2 = self.encoder1_2(x1_2)
        # x1_3.shape = (1, 216, 768)
        x1_3 = hidden_states_out1[5]
        # x1_3.shape = (1, 768, 6, 6, 6)
        x1_3 = self.proj_feat(x1_3, self.hidden_size, self.feat_size)
        # enc1_3.shape = (1, 64, 24, 24, 24)
        enc1_3 = self.encoder1_3(x1_3)
        # x1_4.shape = (1, 216, 768)
        x1_4 = hidden_states_out1[8]
        # x1_4.shape = (1, 768, 6, 6, 6)
        x1_4 = self.proj_feat(x1_4, self.hidden_size, self.feat_size)
        # enc1_4.shape = (1, 128, 12, 12, 12)
        enc1_4 = self.encoder1_4(x1_4)

        # dec1_4.shape = (1, 768, 6, 6, 6)
        dec1_4 = self.proj_feat(x1, self.hidden_size, self.feat_size)
        # dec1_3.shape = (1, 128, 12, 12, 12)
        dec1_3 = self.decoder1_5(dec1_4, enc1_4)
        # dec1_2.shape = (1, 64, 24, 24, 24)
        dec1_2 = self.decoder1_4(dec1_3, enc1_3)
        # dec1_1.shape = (1, 32, 48, 48, 48)
        dec1_1 = self.decoder1_3(dec1_2, enc1_2)
        # out1.shape = (1, 16, 96, 96, 96)
        _out1 = self.decoder1_2(dec1_1, enc1_1)
        # return.shape = (1, 2, 96, 96, 96)
        out1 = self.out1(_out1)
        # return out1

        # x_in2.shape = (1, 3, 96, 96, 96)
        x_in2 = torch.cat((out1, x_in), dim=1)
        # x2.shape = (1, 216, 768)
        # len(hidden_states_out2) = 12
        x2, hidden_states_out2 = self.vit2(x_in2)
        # enc2_1.shape = (1, 16, 96, 96, 96)
        enc2_1 = self.encoder2_1(x_in2)
        # x2_2.shape = (1, 216, 768)
        x2_2 = hidden_states_out2[2]
        # ! <<<
        # x1_2.shape = (1, 768, 6, 6, 6)
        x1_2 = self.proj_feat(x1_2, self.hidden_size, self.feat_size)
        # x2_2.shape = (1, 768, 6, 6, 6)
        x2_2 = self.proj_feat(x2_2, self.hidden_size, self.feat_size)
        # enc2_2.shape = (1, 32, 48, 48, 48)
        enc2_2 = self.encoder2_2(x1_2, x2_2)
        # ! >>>
        # x2_3.shape = (1, 216, 768)
        x2_3 = hidden_states_out2[5]
        # enc2_3.shape = (1, 64, 24, 24, 24)
        # ! <<
        # x2_3.shape = (1, 768, 6, 6, 6)
        x2_3 = self.proj_feat(x2_3, self.hidden_size, self.feat_size)
        # x1_3.shape = (1, 768, 6, 6, 6)
        x1_3 = self.proj_feat(x1_3, self.hidden_size, self.feat_size)
        # enc2_3.shape = (1, 64, 24, 24, 24)
        enc2_3 = self.encoder2_3(x2_3, x1_3)
        # ! >>>
        # x2_4.shape = (1, 216, 768)
        x2_4 = hidden_states_out2[8]
        # enc2_4.shape = (1, 128, 12, 12, 12)
        # ! <<<
        # x1_4.shape = (1, 768, 6, 6, 6)
        x1_4 = self.proj_feat(x1_4, self.hidden_size, self.feat_size)
        # x2_4.shape = (1, 768, 6, 6, 6)
        x2_4 = self.proj_feat(x2_4, self.hidden_size, self.feat_size)
        # enc2_4.shape = (1, 128, 12, 12, 12)
        enc2_4 = self.encoder2_4(x1_4, x2_4)
        # ! >>>
        # dec2_4.shape = (1, 768, 6, 6, 6)
        dec2_4 = self.proj_feat(x2, self.hidden_size, self.feat_size)
        # dec2_3.shape = (1, 128, 12, 12, 12)
        # ! Task: Think about a method to concat three branch.
        dec2_3 = self.decoder2_5(dec1_4, dec2_4, enc2_4, enc1_4)  # dec1_4, dec2_4, enc2_4
        # dec2_2.shape = (1, 64, 24, 24, 24)
        dec2_2 = self.decoder2_4(dec1_3, dec2_3, enc2_3, enc1_3)  # dec1_3, dec2_3, enc2_3
        # dec2_1.shape = (1, 32, 48, 48, 48)
        dec2_1 = self.decoder2_3(dec1_2, dec2_2, enc2_2, enc1_2)  # dec1_2, dec2_2, enc2_2
        # _out2.shape = (1, 16, 96, 96, 96)
        _out2 = self.decoder2_2(dec1_1, dec2_1, enc2_1, enc1_1)  # dec1_1, dec2_1, enc2_1
        # out2.shape = (1, 2, 96, 96, 96)
        out2 = self.out2(_out2)
        return (out1, out2) if training else out2
        # return out2


if __name__ == "__main__":
    model = MyUNETR(in_channels=1, out_channels=2, img_size=(96, 96, 96))
    x = torch.Tensor(1, 1, 96, 96, 96)
    # y = model(x)
    print(model)
    # print(model(x))
