import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.nets.swin_unetr import SwinTransformer, SwinUNETR
from monai.networks.nets.vit import ViT
from .utils import get_region_nums, get_mask_labels, mask_func, get_random_patch_new, get_random_patch

class SwinEncoder(nn.Module):
    def __init__(
            self,
            image_size,
            downsample_scale,
            in_channels: int = 1,
            bottom_encode_size=128,
            learned_mask_tokens=True,

    ):
        super().__init__()

        self.in_channels = in_channels
        self.image_size = image_size
        self.downsample_scale = downsample_scale
        self.learned_mask_tokens = learned_mask_tokens

        self.model = SwinTransformer(
            in_chans=in_channels,
            embed_dim=48,
            window_size=(7, 7, 7),
            patch_size=(2, 2, 2),
            depths=[2, 2, 2, 2],
            num_heads=[3, 6, 12, 24],
            mlp_ratio=4.0,
            qkv_bias=True,
            drop_rate=0.,
            attn_drop_rate=0.,
            drop_path_rate=0.,
            norm_layer=torch.nn.LayerNorm,
            use_checkpoint=False,
            spatial_dims=3,
        )

        self.mask_feat_size = [image_size[0] // downsample_scale[0], image_size[1] // downsample_scale[1], image_size[2] // downsample_scale[2]]

        if learned_mask_tokens:
            self.mask_tokens = nn.Parameter(torch.zeros(1, 1, in_channels * downsample_scale[0] * downsample_scale[1] * downsample_scale[2]))
        else :
            self.mask_tokens = torch.zeros(1, 1, in_channels * downsample_scale[0] * downsample_scale[1] * downsample_scale[2])

        bottom_size = [image_size[0] // downsample_scale[0], image_size[1] // downsample_scale[1], image_size[2] // downsample_scale[2]]
        cur_patch_size = downsample_scale

        self.patches_of_region = image_size[0] // cur_patch_size[0] * image_size[1] // cur_patch_size[1] * image_size[2] // cur_patch_size[2]
        print(f"patches_of_region is {self.patches_of_region}")
        print(f"deep feature size is {bottom_size}")

        self.pred_mask_region = nn.Linear(bottom_encode_size, self.patches_of_region+1)
        self.pred_mask_region_position = nn.Linear(bottom_encode_size, 2)

        self.contrast_learning_head = nn.Linear(bottom_encode_size, 512)
        dim = bottom_encode_size

        self.conv = nn.Sequential(
            nn.Conv3d(dim, dim//2, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim//2),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 2, dim // 4, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 4),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 4, dim // 8, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 8),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 8, dim // 16, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 16),
            nn.LeakyReLU(),
            nn.Conv3d(dim // 16, in_channels, kernel_size=1, stride=1, padding=0),
        )

    def forward(self, x):
        batch_size = x.shape[0]
        device = x.device
        patch_size = self.downsample_scale
        mask_feat_size = self.mask_feat_size
        mask_ratio = torch.clamp(torch.rand(1), 0.3, 1.0)

        x_mask, mask = mask_func(x, self.in_channels, mask_ratio, patch_size, mask_feat_size)
        mask_labels = mask.sum(dim=1).to(device)

        encode_feature = self.model(x_mask)[-1]# b c 4 4 4
        with torch.no_grad():
            encode_feature_2 = self.model(x_mask)[-1]

        x4_reshape = encode_feature.flatten(start_dim=2, end_dim=4)
        x4_reshape = x4_reshape.transpose(1, 2)

        x4_reshape_2 = encode_feature_2.flatten(start_dim=2, end_dim=4)
        x4_reshape_2 = x4_reshape_2.transpose(1, 2)

        contrast_pred = self.contrast_learning_head(x4_reshape[:, 1])
        contrast_pred_2 = self.contrast_learning_head(x4_reshape_2[:, 1])

        x_rec = encode_feature
        x_rec = self.conv(x_rec)

        pred_mask_feature = encode_feature.flatten(start_dim=2, end_dim=4)
        pred_mask_feature = pred_mask_feature.transpose(1, 2)
        mask_region_pred = self.pred_mask_region(pred_mask_feature[:, 0])

        pred_mask_feature_position = encode_feature.flatten(start_dim=2, end_dim=4)
        pred_mask_feature_position = pred_mask_feature_position.transpose(1, 2)
        mask_region_position_pred = self.pred_mask_region_position(pred_mask_feature_position)

        random_patches = F.interpolate(x, size=(24, 24, 24), mode="trilinear", align_corners=False)
        return_data = {
            "pred": x_rec,
            "pred_mask_region": mask_region_pred,
            "pred_mask_position_region": mask_region_position_pred,
            "mask": mask,
            "x_mask": x_mask,
            "patch_size": patch_size,
            "mask_feat_size": mask_feat_size,
            "random_patches": random_patches,
            "mask_labels": mask_labels,
            "contrast_pred_1": contrast_pred,
            "contrast_pred_2": contrast_pred_2,
        }
        return return_data
