import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.nets.swin_unetr import SwinTransformer, SwinUNETR
from monai.networks.nets.vit import ViT
from .utils import get_region_nums, get_mask_labels, mask_func, get_random_patch_new, get_random_patch

class LocalRecHead(nn.Module):
    def __init__(
            self,
            model_name,
            image_size,
            downsample_scale,
            mask_scale,
            in_channels: int = 1,
            bottom_encode_size=128,
            mask_ratio=0.5,
            learned_mask_tokens=True,

    ):
        super().__init__()

        self.in_channels = in_channels
        self.mask_ratio = mask_ratio
        self.image_size = image_size
        self.downsample_scale = downsample_scale
        self.mask_scale = mask_scale
        self.learned_mask_tokens = learned_mask_tokens
        if model_name == "swintransformer":
            self.model = SwinTransformer(
                in_chans=in_channels,
                embed_dim=48,
                window_size=(7, 7, 7),
                patch_size=(2, 2, 2),
                depths=[2, 2, 2, 2],
                num_heads=[3, 6, 12, 24],
                mlp_ratio=4.0,
                qkv_bias=True,
                drop_rate=0.,
                attn_drop_rate=0.,
                drop_path_rate=0.,
                norm_layer=torch.nn.LayerNorm,
                use_checkpoint=False,
                spatial_dims=3,
            )
        elif model_name == "vit":
            self.model = SwinTransformer(
                in_chans=in_channels,
                embed_dim=48,
                window_size=(7, 7, 7),
                patch_size=(2, 2, 2),
                depths=[2, 2, 2, 2],
                num_heads=[3, 6, 12, 24],
                mlp_ratio=4.0,
                qkv_bias=True,
                drop_rate=0.,
                attn_drop_rate=0.,
                drop_path_rate=0.,
                norm_layer=torch.nn.LayerNorm,
                use_checkpoint=False,
                spatial_dims=3,
            )

        self.mask_feat_size = [image_size[0] // mask_scale[0], image_size[1] // mask_scale[1], image_size[2] // mask_scale[2]]

        if learned_mask_tokens:
            self.mask_tokens = nn.Parameter(torch.zeros(1, 1, in_channels * mask_scale[0] * mask_scale[1] * mask_scale[2]))
        else :
            self.mask_tokens = torch.zeros(1, 1, in_channels * mask_scale[0] * mask_scale[1] * mask_scale[2])

        bottom_size = [image_size[0] // downsample_scale[0], image_size[1] // downsample_scale[1], image_size[2] // downsample_scale[2]]
        ## get patches of region
        bottom_patch_size = downsample_scale
        cur_patch_size = mask_scale
        num_patchs = bottom_size[0] * bottom_size[1] * bottom_size[2]

        # self.patches_of_region = bottom_patch_size[0] // cur_patch_size[0] * bottom_patch_size[1] // cur_patch_size[1] * bottom_patch_size[2] // cur_patch_size[2]
        self.patches_of_region = image_size[0] // cur_patch_size[0] * image_size[1] // cur_patch_size[1] * image_size[2] // cur_patch_size[2]
        print(f"patches_of_region is {self.patches_of_region}")
        print(f"deep feature size is {bottom_size}")

        # self.patch_ids_embedding = nn.Embedding(num_patchs, bottom_encode_size)

        self.pred_mask_region = nn.Linear(bottom_encode_size, self.patches_of_region+1)
        self.pred_mask_region_position = nn.Linear(bottom_encode_size, 2)

        self.contrast_learning_head = nn.Linear(bottom_encode_size, 512)
        dim = bottom_encode_size
        # self.conv = nn.Sequential(nn.ConvTranspose3d(dim, dim//2,
        #                                              kernel_size=(2, 2, 2),
        #                                              stride=(2, 2, 2)),
        #                           nn.ConvTranspose3d(dim//2, dim//4,
        #                                              kernel_size=(2, 2, 2),
        #                                              stride=(2, 2, 2)),
        #                           nn.ConvTranspose3d(dim//4, dim//8,
        #                                              kernel_size=(2, 2, 2),
        #                                              stride=(2, 2, 2)),
        #                           nn.Conv3d(dim // 8, dim // 16,
        #                                     kernel_size=3,
        #                                     stride=1,
        #                                     padding=1),
        #                           nn.InstanceNorm3d(dim//16),
        #                           nn.LeakyReLU(),
        #                           nn.Conv3d(dim // 16, in_channels,
        #                                     kernel_size=1,
        #                                     stride=1,
        #                                     padding=0),

        # nn.ConvTranspose3d(dim//8, dim//16,
        #                    kernel_size=(2, 2, 2),
        #                    stride=(2, 2, 2)),
        # nn.ConvTranspose3d(dim//16, 1,
        #                    kernel_size=(2, 2, 2),
        #                    stride=(2, 2, 2))
        # )
        self.conv = nn.Sequential(
            nn.Conv3d(dim, dim//2, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim//2),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 2, dim // 4, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 4),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 4, dim // 8, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 8),
            nn.LeakyReLU(),
            nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False),
            nn.Conv3d(dim // 8, dim // 16, kernel_size=3, stride=1, padding=1),
            nn.InstanceNorm3d(dim // 16),
            nn.LeakyReLU(),
            nn.Conv3d(dim // 16, in_channels, kernel_size=1, stride=1, padding=0),

        )

    def get_feature_patch(self, features, patch_index, ):
        # features: (b, c, 4, 4, 4)
        dim = features.shape[1]
        batch = features.shape[0]
        features = features.flatten(start_dim=2, end_dim=-1)
        patch_index = patch_index.reshape(shape=(batch, 1))
        patch_index = patch_index.repeat(1, dim).unsqueeze(dim=-1)
        return torch.gather(features, dim=2, index=patch_index)

    def forward(self, x):
        batch_size = x.shape[0]
        device = x.device
        patch_size = self.mask_scale
        mask_feat_size = self.mask_feat_size
        mask_ratio = torch.clamp(torch.rand(1), 0.0, 1.0)

        x_mask, mask = mask_func(x, self.in_channels, mask_ratio, patch_size, mask_feat_size)
        mask_labels = mask.sum(dim=1).to(device)
        # num_regions, mask_region_patches = get_region_nums(mask.shape[1], self.patches_of_region)
        # mask_labels = get_mask_labels(batch_size=x.shape[0],
        #                                    num_regions=num_regions,
        #                                    mask=mask,
        #                                    mask_region_patches=mask_region_patches,
        #                                    device=device)
        # This will to be reconstruct
        # random_patches, patch_index = get_random_patch_new(x,
        #                                   downsample_scale=self.downsample_scale,
        #                                   )
        # random_patches, patch_index = get_random_patch(x,
        #                                   downsample_scale=self.downsample_scale,
        #                                   )


        # patch_index = torch.tensor(patch_index, device=device)
        #
        # patch_ids_embedding_out = self.patch_ids_embedding(patch_index) # b c
        # batch_size = patch_ids_embedding_out.shape[0]
        # dim = patch_ids_embedding_out.shape[1]
        # patch_ids_embedding_out = patch_ids_embedding_out.reshape(shape=(batch_size, dim, 1, 1, 1))

        encode_feature = self.model(x_mask)[-1]# b c 4 4 4
        with torch.no_grad():
            encode_feature_2 = self.model(x_mask)[-1]

        x4_reshape = encode_feature.flatten(start_dim=2, end_dim=4)
        x4_reshape = x4_reshape.transpose(1, 2)

        x4_reshape_2 = encode_feature_2.flatten(start_dim=2, end_dim=4)
        x4_reshape_2 = x4_reshape_2.transpose(1, 2)

        contrast_pred = self.contrast_learning_head(x4_reshape[:, 1])
        contrast_pred_2 = self.contrast_learning_head(x4_reshape_2[:, 1])

        # b, c, h, w, d = encode_feature.shape
        # x_rec = encode_feature + patch_ids_embedding_out

        x_rec = encode_feature

        # x_rec = self.get_feature_patch(x_rec, patch_index)
        # x_rec = x_rec.reshape(shape=(batch_size, dim, 1, 1, 1))
        # x_rec = x_rec + patch_ids_embedding_out
        x_rec = self.conv(x_rec)

        pred_mask_feature = encode_feature.flatten(start_dim=2, end_dim=4)
        pred_mask_feature = pred_mask_feature.transpose(1, 2)
        mask_region_pred = self.pred_mask_region(pred_mask_feature[:, 0])


        pred_mask_feature_position = encode_feature.flatten(start_dim=2, end_dim=4)
        pred_mask_feature_position = pred_mask_feature_position.transpose(1, 2)
        mask_region_position_pred = self.pred_mask_region_position(pred_mask_feature_position)

        random_patches = F.interpolate(x, size=(32, 32, 32), mode="trilinear", align_corners=False)
        return_data = {
            "pred": x_rec,
            "pred_mask_region": mask_region_pred,
            "pred_mask_position_region": mask_region_position_pred,
            "mask": mask,
            "x_mask": x_mask,
            "patch_size": patch_size,
            "mask_feat_size": mask_feat_size,
            "random_patches": random_patches,
            "mask_labels": mask_labels,
            "contrast_pred_1": contrast_pred,
            "contrast_pred_2": contrast_pred_2,
        }
        return return_data
