from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from monai.networks.blocks import UpSample
from monai.networks.nets.basic_unet import UpCat

from timm.models.layers import DropPath, to_3tuple, trunc_normal_
import torch.fft


class Mlp(nn.Module):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        return x


class GlobalFilter(nn.Module):
    def __init__(self, dim, h, w, d):
        super().__init__()
        self.complex_weight = nn.Parameter(torch.randn(dim, h, w, d // 2 + 1, 2, dtype=torch.float32) * 0.02)
        self.w = w
        self.h = h
        self.d = d

    def forward(self, x):
        B, N, C = x.shape

        x = x.view(B, self.w, self.h, self.d, C)
        x = x.to(torch.float32)

        x = x.permute(0, -1, 1, 2, 3)
        x = torch.fft.rfftn(x, dim=(1, 2, 3, 4), norm='ortho')
        weight = torch.view_as_complex(self.complex_weight)
        x = x * weight
        x = torch.fft.irfftn(x, dim=(1, 2, 3, 4), norm='ortho')
        x = x.permute(0, 2, 3, 4, 1)

        x = x.reshape(B, N, C)

        return x


class Block(nn.Module):
    def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, h=8, w=8,
                 d=8):
        super().__init__()
        self.norm1 = norm_layer(dim)
        self.filter = GlobalFilter(dim, h=h, w=w, d=d)
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

    def forward(self, x):
        x = x + self.drop_path(self.mlp(self.norm2(self.filter(self.norm1(x)))))
        return x


class BlockLayerScale(nn.Module):
    def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0., act_layer=nn.GELU,
                 norm_layer=nn.LayerNorm, h=8, w=8, d=8, init_values=1e-5):
        super().__init__()
        self.norm1 = norm_layer(dim)
        self.filter = GlobalFilter(dim, h=h, w=w, d=d)
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
        self.gamma = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)

    def forward(self, x):
        x = x + self.drop_path(self.gamma * self.mlp(self.norm2(self.filter(self.norm1(x)))))
        return x


class PatchEmbed(nn.Module):
    def __init__(self, img_size=128, patch_size=4, in_chans=3, embed_dim=768):
        super().__init__()
        img_size = to_3tuple(img_size)
        patch_size = to_3tuple(patch_size)
        num_patches = (img_size[2] // patch_size[2]) * (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
        self.img_size = img_size
        self.patch_size = patch_size
        self.num_patches = num_patches

        self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)

    def forward(self, x):
        B, C, H, W, D = x.shape
        x = self.proj(x).flatten(2).transpose(1, 2)
        return x


class DownLayer(nn.Module):
    def __init__(self, img_size=32, dim_in=64, dim_out=128):
        super().__init__()
        self.img_size = img_size
        self.dim_in = dim_in
        self.dim_out = dim_out
        self.proj = nn.Conv3d(dim_in, dim_out, kernel_size=2, stride=2)
        self.num_patches = img_size * img_size * img_size // 8

    def forward(self, x):
        B, N, C = x.size()
        x = x.view(B, self.img_size, self.img_size, self.img_size, C).permute(0, 4, 1, 2, 3)
        x = self.proj(x).permute(0, 2, 3, 4, 1)
        x = x.reshape(B, -1, self.dim_out)
        return x


class GFNetPyramid(nn.Module):

    def __init__(self, in_channel=1, img_size=128, patch_size=4, embed_dim=[64, 128, 256, 512], depth=[2, 2, 10, 4],
                 mlp_ratio=[4, 4, 4, 4], drop_rate=0., drop_path_rate=0., norm_layer=None,
                 init_values=0.001, no_layerscale=False, dropcls=0):
        super().__init__()
        self.num_features = self.embed_dim = embed_dim[-1]  # num_features for consistency with other models
        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
        self.img_size = img_size
        self.patch_size = patch_size
        self.patch_embed = nn.ModuleList()

        patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_channel, embed_dim=embed_dim[0])
        self.num_patches = patch_embed.num_patches
        self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim[0]))
        self.patch_embed.append(patch_embed)

        sizes = [128 // 4, 128 // 8, 128 // 16, 128 // 32]

        for i in range(4):
            sizes[i] = sizes[i] * img_size // 128

        for i in range(3):
            patch_embed = DownLayer(sizes[i], embed_dim[i], embed_dim[i + 1])
            num_patches = patch_embed.num_patches
            self.patch_embed.append(patch_embed)

        self.pos_drop = nn.Dropout(p=drop_rate)
        self.blocks = nn.ModuleList()

        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))]  # stochastic depth decay rule
        cur = 0
        for i in range(4):
            h = w = d = sizes[i]

            if no_layerscale:
                print('using standard block')
                blk = nn.Sequential(*[
                    Block(
                        dim=embed_dim[i], mlp_ratio=mlp_ratio[i],
                        drop=drop_rate, drop_path=dpr[cur + j], norm_layer=norm_layer, h=h, w=w, d=d)
                    for j in range(depth[i])
                ])
            else:
                print('using layerscale block')
                blk = nn.Sequential(*[
                    BlockLayerScale(
                        dim=embed_dim[i], mlp_ratio=mlp_ratio[i],
                        drop=drop_rate, drop_path=dpr[cur + j], norm_layer=norm_layer, h=h, w=w, d=d,
                        init_values=init_values)
                    for j in range(depth[i])
                ])
            self.blocks.append(blk)
            cur += depth[i]

        if dropcls > 0:
            print('dropout %.2f before classifier' % dropcls)
            self.final_dropout = nn.Dropout(p=dropcls)
        else:
            self.final_dropout = nn.Identity()

        trunc_normal_(self.pos_embed, std=.02)
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x):
        features = []
        for i in range(4):
            x = self.patch_embed[i](x)
            if i == 0:
                x = x + self.pos_embed
            x = self.blocks[i](x)
            B, N, C = x.shape
            h = w = d = round(int(N) ** (1 / 3))

            feature = rearrange(x, 'b (h w d) c -> b c h w d', h=h, w=w, d=d)
            features.append(feature)
        return features


class GFUnet(nn.Module):
    def __init__(self, spatial_dims=3, in_channel=1, out_channels=2, img_size=128, patch_size=4,
                 embed_dim=[64, 128, 256, 512], depth=[2, 2, 10, 4],
                 mlp_ratio=[4, 4, 4, 4], drop_rate=0., drop_path_rate=0., norm_layer=None,
                 init_values=0.001, no_layerscale=False, dropcls=0,
                 act=("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
                 norm=("instance", {"affine": True}), bias=True,
                 dropout=0.0, upsample="deconv"):
        super().__init__()
        self.backbone = GFNetPyramid(in_channel=in_channel, img_size=img_size, patch_size=patch_size,
                                     embed_dim=embed_dim, depth=depth,
                                     mlp_ratio=mlp_ratio, drop_rate=drop_rate, drop_path_rate=drop_path_rate,
                                     norm_layer=norm_layer,
                                     init_values=init_values, no_layerscale=no_layerscale, dropcls=dropcls)
        self.upcat_3 = UpCat(spatial_dims, embed_dim[-1], embed_dim[-2], embed_dim[-2], act, norm, bias, dropout,
                             upsample)
        self.upcat_2 = UpCat(spatial_dims, embed_dim[-2], embed_dim[-3], embed_dim[-3], act, norm, bias, dropout,
                             upsample)
        self.upcat_1 = UpCat(spatial_dims, embed_dim[-3], embed_dim[-4], embed_dim[-4], act, norm, bias, dropout,
                             upsample)

        self.proj = nn.Sequential(
            UpSample(
                spatial_dims=3,
                in_channels=embed_dim[-4],
                out_channels=embed_dim[-4],
                scale_factor=2,
                mode="deconv"
            ),
            UpSample(
                spatial_dims=3,
                in_channels=embed_dim[-4],
                out_channels=out_channels,
                scale_factor=2,
                mode="deconv"
            )
        )

    def forward(self, x):
        x0, x1, x2, x3 = self.backbone(x)
        u3 = self.upcat_3(x3, x2)
        u2 = self.upcat_2(u3, x1)
        u1 = self.upcat_1(u2, x0)
        logits = self.proj(u1)
        return logits
