from __future__ import annotations

from typing import Sequence

import numpy as np
import torch
from monai.networks.blocks import UnetrUpBlock, UnetrBasicBlock
from monai.networks.blocks.squeeze_and_excitation import ChannelSELayer
from monai.networks.nets import SwinUNETR


class SSwinUNETRBase(SwinUNETR):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False,
                 num_of_se_block: int | Sequence[bool] = 5):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint)
        self.fusion5 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=16 * feature_size + 8,
            out_channels=16 * feature_size,
            stride=1,
            kernel_size=3,
            norm_name=norm_name,
            res_block=False,
        )
        self.fusion4 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=8 * feature_size + 8,
            out_channels=8 * feature_size,
            stride=1,
            kernel_size=3,
            norm_name=norm_name,
            res_block=False,
        )
        self.fusion3 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=4 * feature_size + 8,
            out_channels=4 * feature_size,
            stride=1,
            kernel_size=3,
            norm_name=norm_name,
            res_block=False,
        )
        self.fusion2 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=2 * feature_size + 8,
            out_channels=2 * feature_size,
            stride=1,
            kernel_size=3,
            norm_name=norm_name,
            res_block=False,
        )
        self.fusion1 = UnetrBasicBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size + 8,
            out_channels=feature_size,
            stride=1,
            kernel_size=3,
            norm_name=norm_name,
            res_block=False,
        )
        """
        self.decoder5 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=16 * feature_size+8,
            out_channels=8 * feature_size,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=True,
        )

        self.decoder4 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 8+8,
            out_channels=feature_size * 4,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=True,
        )

        self.decoder3 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 4+8,
            out_channels=feature_size * 2,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=True,
        )
        self.decoder2 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size * 2+8,
            out_channels=feature_size,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=True,
        )

        self.decoder1 = UnetrUpBlock(
            spatial_dims=spatial_dims,
            in_channels=feature_size+8,
            out_channels=feature_size,
            kernel_size=3,
            upsample_kernel_size=2,
            norm_name=norm_name,
            res_block=True,
        )
        """


        self.num_of_se_block = num_of_se_block
        se_channels = [16 * feature_size, 8 * feature_size, 4 * feature_size, 2 * feature_size,
                       feature_size]
        if type(self.num_of_se_block) is int:
            se_channels = se_channels[:num_of_se_block]
            self.se_blocks = [ChannelSELayer(spatial_dims=spatial_dims, in_channels=se_channels[i], add_residual=False)
                              for i in range(num_of_se_block)]
            self.num_of_se_block = np.array([True] * self.num_of_se_block + [False] * (5 - self.num_of_se_block),
                                            dtype=bool)
        else:
            se_channels = np.array(se_channels)
            self.num_of_se_block = np.array(num_of_se_block, dtype=bool)
            se_channels = se_channels[self.num_of_se_block]
            self.se_blocks = [ChannelSELayer(spatial_dims=spatial_dims, in_channels=se_channels[i], add_residual=True)
                              for i in range(num_of_se_block)]


class SSwinUNETR5(SSwinUNETRBase):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint,
                         num_of_se_block=5,
                         )
        self.se_block1, self.se_block2, self.se_block3, self.se_block4, self.se_block5 = self.se_blocks

    def forward(self, x_in):
        raw_data = x_in[0]
        hidden_states_out = self.swinViT(raw_data, self.normalize)
        enc0 = self.encoder1(raw_data)
        enc1 = self.encoder2(hidden_states_out[0])
        enc1 = torch.cat([enc1, x_in[1]], dim=1)
        enc1 = self.fusion1(enc1)
        enc2 = self.encoder3(hidden_states_out[1])
        enc2 = torch.cat([enc2, x_in[2]], dim=1)
        enc2 = self.fusion2(enc2)
        enc3 = self.encoder4(hidden_states_out[2])
        enc3 = torch.cat([enc3, x_in[3]], dim=1)
        enc3 = self.fusion3(enc3)
        dec4 = self.encoder10(hidden_states_out[4])
        dec4 = torch.cat([dec4, x_in[5]], dim=1)
        dec4 = self.fusion5(dec4)
        dec4 = self.se_block1(dec4)
        hidden_states_out[3] = torch.cat([hidden_states_out[3], x_in[4]], dim=1)
        hidden_states_out[3] = self.fusion4(hidden_states_out[3])
        dec3 = self.decoder5(dec4, hidden_states_out[3])
        # dec3 = torch.cat([dec3, x_in[4]], dim=1)
        # dec3 = self.fusion4(dec3)
        dec3 = self.se_block2(dec3)
        dec2 = self.decoder4(dec3, enc3)
        # dec2 = torch.cat([dec2, x_in[3]], dim=1)
        # dec2 = self.fusion3(dec2)
        dec2 = self.se_block3(dec2)
        dec1 = self.decoder3(dec2, enc2)
        # dec1 = torch.cat([dec1, x_in[2]], dim=1)
        # dec1 = self.fusion2(dec1)
        dec1 = self.se_block4(dec1)
        dec0 = self.decoder2(dec1, enc1)
        # dec0 = torch.cat([dec0, x_in[1]], dim=1)
        # dec0 = self.fusion1(dec0)
        dec0 = self.se_block5(dec0)
        out = self.decoder1(dec0, enc0)
        logits = self.out(out)
        return logits


class SSwinUNETR4(SSwinUNETRBase):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint,
                         num_of_se_block=4,
                         )
        self.se_block1, self.se_block2, self.se_block3, self.se_block4 = self.se_blocks

    def forward(self, x_in):
        hidden_states_out = self.swinViT(x_in, self.normalize)
        enc0 = self.encoder1(x_in)
        enc1 = self.encoder2(hidden_states_out[0])
        enc2 = self.encoder3(hidden_states_out[1])
        enc3 = self.encoder4(hidden_states_out[2])
        dec4 = self.encoder10(hidden_states_out[4])
        dec4 = self.se_block1(dec4)
        dec3 = self.decoder5(dec4, hidden_states_out[3])
        dec3 = self.se_block2(dec3)
        dec2 = self.decoder4(dec3, enc3)
        dec2 = self.se_block3(dec2)
        dec1 = self.decoder3(dec2, enc2)
        dec1 = self.se_block4(dec1)
        dec0 = self.decoder2(dec1, enc1)
        # dec0 = self.se_block5(dec0)
        out = self.decoder1(dec0, enc0)
        logits = self.out(out)
        return logits


class SSwinUNETR3(SSwinUNETRBase):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint,
                         num_of_se_block=3,
                         )
        self.se_block1, self.se_block2, self.se_block3 = self.se_blocks

    def forward(self, x_in):
        hidden_states_out = self.swinViT(x_in, self.normalize)
        enc0 = self.encoder1(x_in)
        enc1 = self.encoder2(hidden_states_out[0])
        enc2 = self.encoder3(hidden_states_out[1])
        enc3 = self.encoder4(hidden_states_out[2])
        dec4 = self.encoder10(hidden_states_out[4])
        dec4 = self.se_block1(dec4)
        dec3 = self.decoder5(dec4, hidden_states_out[3])
        dec3 = self.se_block2(dec3)
        dec2 = self.decoder4(dec3, enc3)
        dec2 = self.se_block3(dec2)
        dec1 = self.decoder3(dec2, enc2)
        # dec1 = self.se_block4(dec1)
        dec0 = self.decoder2(dec1, enc1)
        # dec0 = self.se_block5(dec0)
        out = self.decoder1(dec0, enc0)
        logits = self.out(out)
        return logits


class SSwinUNETR2(SSwinUNETRBase):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint,
                         num_of_se_block=2,
                         )
        self.se_block1, self.se_block2 = self.se_blocks

    def forward(self, x_in):
        hidden_states_out = self.swinViT(x_in, self.normalize)
        enc0 = self.encoder1(x_in)
        enc1 = self.encoder2(hidden_states_out[0])
        enc2 = self.encoder3(hidden_states_out[1])
        enc3 = self.encoder4(hidden_states_out[2])
        dec4 = self.encoder10(hidden_states_out[4])
        dec4 = self.se_block1(dec4)
        dec3 = self.decoder5(dec4, hidden_states_out[3])
        dec3 = self.se_block2(dec3)
        dec2 = self.decoder4(dec3, enc3)
        # dec2 = self.se_block3(dec2)
        dec1 = self.decoder3(dec2, enc2)
        # dec1 = self.se_block4(dec1)
        dec0 = self.decoder2(dec1, enc1)
        # dec0 = self.se_block5(dec0)
        out = self.decoder1(dec0, enc0)
        logits = self.out(out)
        return logits


class SSwinUNETR1(SSwinUNETRBase):
    def __init__(self,
                 img_size,
                 in_channels,
                 out_channels,
                 spatial_dims: int = 3,
                 feature_size: int = 24,
                 drop_rate: float = 0.0,
                 attn_drop_rate: float = 0.0,
                 dropout_path_rate: float = 0.0,
                 norm_name: str = 'instance',
                 use_checkpoint: bool = False):
        super().__init__(img_size=img_size,
                         in_channels=in_channels,
                         out_channels=out_channels,
                         spatial_dims=spatial_dims,
                         feature_size=feature_size,
                         drop_rate=drop_rate,
                         attn_drop_rate=attn_drop_rate,
                         dropout_path_rate=dropout_path_rate,
                         norm_name=norm_name,
                         use_checkpoint=use_checkpoint,
                         num_of_se_block=1,
                         )
        self.se_block1 = self.se_blocks[0]

    def forward(self, x_in):
        hidden_states_out = self.swinViT(x_in, self.normalize)
        enc0 = self.encoder1(x_in)
        enc1 = self.encoder2(hidden_states_out[0])
        enc2 = self.encoder3(hidden_states_out[1])
        enc3 = self.encoder4(hidden_states_out[2])
        dec4 = self.encoder10(hidden_states_out[4])
        dec4 = self.se_block1(dec4)
        dec3 = self.decoder5(dec4, hidden_states_out[3])
        # dec3 = self.se_block2(dec3)
        dec2 = self.decoder4(dec3, enc3)
        # dec2 = self.se_block3(dec2)
        dec1 = self.decoder3(dec2, enc2)
        # dec1 = self.se_block4(dec1)
        dec0 = self.decoder2(dec1, enc1)
        # dec0 = self.se_block5(dec0)
        out = self.decoder1(dec0, enc0)
        logits = self.out(out)
        return logits


"""
# test pipeline
if __name__ == "__main__":
    import torch

    test_model = SSwinUNETR1(img_size=(96, 96, 96), in_channels=1, out_channels=4)
    test_model = test_model.cuda(3)
    test_input = torch.randn(1, 1, 96, 96, 96).cuda(3)
    test_trace = torch.jit.trace(test_model, test_input)
    test_trace.save("{0}.pt".format(type(test_model).__name__))
    test_out = test_model(test_input)
    print(test_model)
    print(test_out.shape)
"""
