from typing import Type, Tuple, List

import torch
from torch import nn

from algorithms.common import LayerNorm2d
from torch.nn import functional as F


class MaskDecoder(nn.Module):
    def __init__(
            self,
            *,
            transformer_dim: int,
            transformer: nn.Module,
            num_multi_mask_outputs: int = 3,
            activation: Type[nn.Module] = nn.GELU,
            iou_head_depth: int = 3,
            iou_head_hidden_dim: int = 256,
    ) -> None:
        """
        Predicts masks given an image and prompt embeddings,using a transformer architecture.
        :param transformer_dim (int): the channel dimension of the transformer
        :param transformer (nn.Module): the tarnsformer used to predict masks:
        :param num_multi_mask_outputs (int): the number of masks to predict when disambiguating masks
        :param activation(nn.Module):the type of activation to use when upscaling masks
        :param iou_head_depth(int):the depth of the MLP used to predict mask quality
        :param iou_head_hidden_dim(int):the hidden dimension of the MLP used to predict mask quality
        """
        super().__init__()
        self.transformer_dim = transformer_dim
        self.transformer = transformer
        self.num_multi_mask_outputs = num_multi_mask_outputs
        self.num_mask_tokens = num_multi_mask_outputs + 1  # 这里的值是4
        # 生成候选分割掩码。模型为每个 mask_token 提取对应的特征信息，然后经过解码得到一个候选分割结果。
        self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
        # 专注于预测相应候选掩码与真实分割之间的重叠程度（即 IoU 值），用来评估每个候选分割的质量，辅助后续掩码的选择和排序。
        self.iou_token = nn.Embedding(1, transformer_dim)

        # ConvTranspose2d:反卷积操作，将特征图的大小逐渐增大，同时减少其通道数，从而生成高分辨率的掩膜。
        self.output_upscaling = nn.Sequential(
            nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
            LayerNorm2d(transformer_dim // 4),
            activation(),
            nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
            activation(),
        )
        self.output_hypernetworks_mlps = nn.ModuleList(
            [
                MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
                for i in range(self.num_mask_tokens)
            ]
        )
        self.iou_prediction_head = MLP(
            transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
        )

    def forward(
            self,
            image_embeddings: torch.Tensor,
            image_pe: torch.Tensor,
            sparse_prompt_embeddings: torch.Tensor,
            dense_prompt_embeddings: torch.Tensor,
            multi_mask_output: bool,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Args:
          image_embedding (torch.Tensor): 来自ImageEncoder的embedding
          image_pe(torch.Tensor): 位置编码，来自PromptEncoder
          sparse_prompt_embeddings(torch.Tensor): 来自PromptEncoder的points embedding和boxes embedding
          dense_prompt_embeddings(torch.Tensor): 来自PromptEncoder的mask embedding.
          multi_mask_output(bool): 是否输出多个mask.
        :return:
        """
        masks, iou_pred = self.predict_masks(
            image_embeddings=image_embeddings,
            image_pe=image_pe,
            sparse_prompt_embeddings=sparse_prompt_embeddings,
            dense_prompt_embeddings=dense_prompt_embeddings,
        )
        if multi_mask_output:
            mask_slice = slice(1, None)
        else:
            mask_slice = slice(0, 1)
        masks = masks[:, mask_slice, :, :]
        iou_pred = iou_pred[:, mask_slice]

        # Prepare output
        return masks, iou_pred

    def predict_masks(
            self,
            image_embeddings: torch.Tensor,
            image_pe: torch.Tensor,
            sparse_prompt_embeddings: torch.Tensor,
            dense_prompt_embeddings: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Predicts masks"""
        # iou_token的shape默认是(1,C)
        # mask_tokens的shape默认是(4,C),output_tokens的shape是(1+4,C)
        output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
        # unsqueeze(0)指的是增加一个维度，即 (1,1+4,C)，expand后(B,1+4,C)
        output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
        # sparse_prompt_embedding — (B,3,C),tokens — (B,1+4+3,C)
        tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
        # image_embedding的shape是(1,...)，这里将1复制成与token的B相同的维度
        src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
        # dense_prompt_embeddings来自上一次预测的mask输入
        src = src + dense_prompt_embeddings
        # image_pe，来自prompt_encoder的PositionEmbeddingRandom,(1,...,...,C)
        pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
        b, c, h, w = src.shape
        # 经过transformer后的queries,keys
        hs, src = self.transformer(src, pos_src, tokens)
        # 根据前面iou.tokens和mask_tokens的叠加顺序进行判断
        iou_token_out = hs[:, 0, :]
        mask_tokens_out = hs[:, 1:(1 + self.num_mask_tokens), :]
        # 对transformer后的image_embedding进行升维
        src = src.transpose(1, 2).view(b, c, h, w)
        up_scaled_embedding = self.output_upscaling(src)
        hyper_in_list: List[torch.Tensor] = []
        # 计算每一个mask_token的MLP，然后根据维度1堆叠起来（stack）
        for i in range(self.num_mask_tokens):
            hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
        hyper_in = torch.stack(hyper_in_list, dim=1)
        b, c, h, w = up_scaled_embedding.shape
        masks = (hyper_in @ up_scaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
        iou_pred = self.iou_prediction_head(iou_token_out)
        return masks, iou_pred


class MLP(nn.Module):
    def __init__(
            self,
            input_dim: int,
            hidden_dim: int,
            output_dim: int,
            num_layers: int,
            sigmoid_output: bool = False,
    ) -> None:
        super().__init__()
        self.num_layers = num_layers
        # 如果 hidden_dim=128，num_layers=3，则 h=[128]*(3-1)=[128,128]
        h = [hidden_dim] * (num_layers - 1)
        # 假设 input_dim=64,output_dim=10
        # 则 [input_dim]+h=[64,128,128]
        # 则 h+[output_dim]=[128,128,10]
        # 则 zip([input_dim] + h, h + [output_dim])=[(64,128),(128,128),(128,10)]
        self.layers = nn.ModuleList(
            nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
        )
        self.sigmoid_output = sigmoid_output

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        if self.sigmoid_output:
            x = F.sigmoid(x)
        return x
