

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.nets.swin_unetr import SwinTransformer, SwinUNETR
from .utils import get_region_nums, get_mask_labels, mask_func, get_random_patch

class SimMIMHead(nn.Module):
    def __init__(
            self,
            model_name,
            image_size,
            downsample_scale,
            mask_scale,
            in_channels: int = 1,
            bottom_encode_size=128,
            mask_ratio=0.5,
            learned_mask_tokens=True,

    ):
        super().__init__()

        self.in_channels = in_channels
        self.mask_ratio = mask_ratio
        self.image_size = image_size
        self.downsample_scale = downsample_scale
        self.mask_scale = mask_scale
        self.learned_mask_tokens = learned_mask_tokens
        if model_name == "swintransformer":
            self.model = SwinTransformer(
                in_chans=in_channels,
                embed_dim=48,
                window_size=(7, 7, 7),
                patch_size=(2, 2, 2),
                depths=[2, 2, 2, 2],
                num_heads=[3, 6, 12, 24],
                mlp_ratio=4.0,
                # num_heads=[2,4,8,16],
                qkv_bias=True,
                drop_rate=0.,
                attn_drop_rate=0.,
                drop_path_rate=0.,
                norm_layer=torch.nn.LayerNorm,
                use_checkpoint=False,
                spatial_dims=3,
            )

        self.mask_feat_size = [image_size[0] // mask_scale[0], image_size[1] // mask_scale[1], image_size[2] // mask_scale[2]]

        if learned_mask_tokens:
            self.mask_tokens = nn.Parameter(torch.zeros(1, 1, in_channels * mask_scale[0] * mask_scale[1] * mask_scale[2]))
        else :
            self.mask_tokens = torch.zeros(1, 1, in_channels * mask_scale[0] * mask_scale[1] * mask_scale[2])

        bottom_size = [image_size[0] // downsample_scale[0], image_size[1] // downsample_scale[1], image_size[2] // downsample_scale[2]]
        ## get patches of region
        bottom_patch_size = downsample_scale
        cur_patch_size = mask_scale

        self.patches_of_region = bottom_patch_size[0] // cur_patch_size[0] * bottom_patch_size[1] // cur_patch_size[1] * bottom_patch_size[2] // cur_patch_size[2]
        print(f"patches_of_region is {self.patches_of_region}")
        print(f"deep feature size is {bottom_size}")

        dim = bottom_encode_size
        self.conv = nn.Sequential(nn.Conv3d(dim, dim//2, kernel_size=3, stride=1, padding=1),
                                  nn.InstanceNorm3d(dim//2),
                                  nn.LeakyReLU(),
                                  nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
                                  nn.Conv3d(dim // 2, dim // 4, kernel_size=3, stride=1, padding=1),
                                  nn.InstanceNorm3d(dim // 4),
                                  nn.LeakyReLU(),
                                  nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
                                  nn.Conv3d(dim // 4, dim // 8, kernel_size=3, stride=1, padding=1),
                                  nn.InstanceNorm3d(dim // 8),
                                  nn.LeakyReLU(),
                                  nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
                                  nn.Conv3d(dim // 8, dim // 16, kernel_size=3, stride=1, padding=1),
                                  nn.InstanceNorm3d(dim // 16),
                                  nn.LeakyReLU(),
                                  nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
                                  nn.Conv3d(dim // 16, dim // 16, kernel_size=3, stride=1, padding=1),
                                  nn.InstanceNorm3d(dim // 16),
                                  nn.LeakyReLU(),
                                  nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
                                  nn.Conv3d(dim // 16, 4, kernel_size=1, stride=1)
                                  )

    def forward(self, x):
        device = x.device
        patch_size = self.mask_scale
        mask_feat_size = self.mask_feat_size
        x_mask, mask = mask_func(x, self.in_channels, self.mask_ratio, patch_size, mask_feat_size)

        # num_regions, mask_region_patches = get_region_nums(mask.shape[1], self.patches_of_region)
        # mask_labels = get_mask_labels(batch_size=x.shape[0],
        #                               num_regions=num_regions,
        #                               mask=mask,
        #                               mask_region_patches=mask_region_patches,
        #                               device=device)
        # This will to be reconstruct
        # random_patches, patch_index = get_random_patch(x,
        #                                                downsample_scale=self.downsample_scale,
        #                                                mask_labels=mask_labels,
        #                                                patches_of_region=self.patches_of_region)

        # patch_index = torch.tensor(patch_index, device=device)

        encode_feature = self.model(x_mask)[-1]
        # with torch.no_grad():
        #     encode_feature_2 = self.model(x_mask)[-1]

        # x4_reshape = encode_feature.flatten(start_dim=2, end_dim=4)
        # x4_reshape = x4_reshape.transpose(1, 2)

        # x4_reshape_2 = encode_feature_2.flatten(start_dim=2, end_dim=4)
        # x4_reshape_2 = x4_reshape_2.transpose(1, 2)

        # contrast_pred = self.contrast_learning_head(x4_reshape[:, 1])
        # contrast_pred_2 = self.contrast_learning_head(x4_reshape_2[:, 1])

        b, c, h, w, d = encode_feature.shape

        x_rec = encode_feature
        # random_patch_index = patch_index.reshape(b, 1).repeat(1, c).unsqueeze(dim=-1)
        # pred_patch = torch.gather(x_rec, dim=-1, index=random_patch_index) # (b, c, 1)

        # x_rec = pred_patch.view(b, c, 1, 1, 1)
        x_rec = self.conv(x_rec)

        # patch_feature = pred_patch.squeeze(dim=-1)
        #
        # mask_region_pred = self.pred_mask_region(patch_feature)
        # patch_label_index = patch_index.reshape(b, 1)
        # mask_region_label = torch.gather(mask_labels, dim=1, index=patch_label_index)

        return_data = {
            "pred": x_rec,
            # "pred_mask_region": mask_region_pred,
            "mask": mask,
            "x_mask": x_mask,
            "patch_size": patch_size,
            "mask_feat_size": mask_feat_size,
            # "random_patches": random_patches,

            # "mask_labels": mask_region_label,
            # "contrast_pred_1": contrast_pred,
            # "contrast_pred_2": contrast_pred_2,
        }
        return return_data
