# Copyright (C) 2022-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).

# --------------------------------------------------------
# DPT head for ViTs
# --------------------------------------------------------
# References:
# https://github.com/isl-org/DPT
# https://github.com/EPFL-VILAB/MultiMAE/blob/main/multimae/output_adapters.py

import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from typing import Union, Tuple, Iterable, List, Optional

# 返回补丁尺寸元组
def pair(t):
    return t if isinstance(t, tuple) else (t, t) # 若为int，则转为元组

 # 改变所有中间层通道为特征通道(256)
def make_scratch(in_shape, out_shape, groups=1, expand=False): # 输入中间层通道List，特征通道(256)
    scratch = nn.Module()

    out_shape1 = out_shape
    out_shape2 = out_shape
    out_shape3 = out_shape
    out_shape4 = out_shape
    if expand == True: # 默认False
        out_shape1 = out_shape
        out_shape2 = out_shape * 2
        out_shape3 = out_shape * 4
        out_shape4 = out_shape * 8

    scratch.layer1_rn = nn.Conv2d( # 2d卷积，输入中间层通道，输出特征通道(256)，尺寸不变
        in_shape[0],
        out_shape1,
        kernel_size=3,
        stride=1,
        padding=1,
        bias=False,
        groups=groups,
    )
    scratch.layer2_rn = nn.Conv2d( # 2d卷积，同上
        in_shape[1],
        out_shape2,
        kernel_size=3,
        stride=1,
        padding=1,
        bias=False,
        groups=groups,
    )
    scratch.layer3_rn = nn.Conv2d( # 2d卷积，同上
        in_shape[2],
        out_shape3,
        kernel_size=3,
        stride=1,
        padding=1,
        bias=False,
        groups=groups,
    )
    scratch.layer4_rn = nn.Conv2d( # 2d卷积，同上
        in_shape[3],
        out_shape4,
        kernel_size=3,
        stride=1,
        padding=1,
        bias=False,
        groups=groups,
    )

    # 将所有中间层处理打包，中间层通道->特征通道[256]*4，尺寸不变
    scratch.layer_rn = nn.ModuleList([
        scratch.layer1_rn,
        scratch.layer2_rn,
        scratch.layer3_rn,
        scratch.layer4_rn,
    ])

    return scratch

 # 自定义残差卷积单元
class ResidualConvUnit_custom(nn.Module):
    """Residual convolution module."""

    def __init__(self, features, activation, bn): # 特征通道(256)，ReLU，批归一化
        """Init.
        Args:
            features (int): number of features
        """
        super().__init__()

        self.bn = bn

        self.groups = 1

        self.conv1 = nn.Conv2d( # 2d卷积，通道不变，尺寸不变
            features,
            features,
            kernel_size=3,
            stride=1,
            padding=1,
            bias=not self.bn,
            groups=self.groups,
        )

        self.conv2 = nn.Conv2d( # 同上
            features,
            features,
            kernel_size=3,
            stride=1,
            padding=1,
            bias=not self.bn,
            groups=self.groups,
        )

        if self.bn == True: # 批归一化
            self.bn1 = nn.BatchNorm2d(features)
            self.bn2 = nn.BatchNorm2d(features)

        self.activation = activation # ReLU

        self.skip_add = nn.quantized.FloatFunctional() # 使用量化的方式实现残差连接

    def forward(self, x):
        """Forward pass.
        Args:
            x (tensor): input
        Returns:
            tensor: output
        """

        out = self.activation(x) # ReLU
        out = self.conv1(out) # 2d卷积，通道不变，尺寸不变
        if self.bn == True:
            out = self.bn1(out) # 批归一化

        out = self.activation(out) # 同上
        out = self.conv2(out)
        if self.bn == True:
            out = self.bn2(out)

        if self.groups > 1:
            out = self.conv_merge(out)

        return self.skip_add.add(out, x) # 残差连接

# 自定义特征融合块
class FeatureFusionBlock_custom(nn.Module):
    """Feature fusion block."""

    def __init__( # 特征通道(256)，ReLU，转置卷积，批归一化，输出通道/2(False)，插值规则，输出宽度比例
        self,
        features,
        activation,
        deconv=False,
        bn=False,
        expand=False,
        align_corners=True,
        width_ratio=1,
    ):
        """Init.
        Args:
            features (int): number of features
        """
        super(FeatureFusionBlock_custom, self).__init__()
        self.width_ratio = width_ratio

        self.deconv = deconv
        self.align_corners = align_corners

        self.groups = 1

        self.expand = expand
        out_features = features
        if self.expand == True:
            out_features = features // 2

        self.out_conv = nn.Conv2d( # 2d卷积，输入特征通道(256)，输出(256,expand=False)通道，尺寸不变
            features,
            out_features,
            kernel_size=1,
            stride=1,
            padding=0,
            bias=True,
            groups=1,
        )

        self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) # 自定义残差卷积单元，两次2d卷积，输入特征通道(256)，ReLU，批归一化
        self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)

        self.skip_add = nn.quantized.FloatFunctional() # 使用量化的方式实现残差连接

    def forward(self, *xs): # 待融合特征0，待融合特征1，(batch_size,256,高补丁数量，宽补丁数量)
        """Forward pass.
        Returns:
            tensor: output
        """
        output = xs[0] # 待融合特征0

        if len(xs) == 2: # 有两个待融合特征
            res = self.resConfUnit1(xs[1]) # 自定义残差卷积单元，两次2d卷积后残差连接，输入待融合特征1
            if self.width_ratio != 1: # 输出宽度比例
                res = F.interpolate(res, size=(output.shape[2], output.shape[3]), mode='bilinear') # 双线性插值

            output = self.skip_add.add(output, res) # 残差连接，待融合特征0+待融合特征1
            # output += res

        output = self.resConfUnit2(output) # 自定义残差卷积单元

        if self.width_ratio != 1: # 调整特征尺寸
            # and output.shape[3] < self.width_ratio * output.shape[2]
            #size=(image.shape[])
            if (output.shape[3] / output.shape[2]) < (2 / 3) * self.width_ratio:
                shape = 3 * output.shape[3]
            else:
                shape = int(self.width_ratio * 2 * output.shape[2])
            output  = F.interpolate(output, size=(2* output.shape[2], shape), mode='bilinear') # 双线性插值
        else:
            output = nn.functional.interpolate(output, scale_factor=2, mode="bilinear", align_corners=self.align_corners) # 双线性插值，缩放到原尺寸的两倍
        output = self.out_conv(output) # 2d卷积，输入特征通道(256)，输出(256,expand=False)通道，尺寸不变
        return output

# 构造融合块
def make_fusion_block(features, use_bn, width_ratio=1): # 输入特征通道(256)，是否使用批归一化(False)，输出宽度比例(1)
    return FeatureFusionBlock_custom( # 自定义特征融合块，特征通道(256)，ReLU，转置卷积，批归一化，输出通道/2，插值规则，输出宽度比例
        features,
        nn.ReLU(False),
        deconv=False,
        bn=use_bn,
        expand=False,
        align_corners=True,
        width_ratio=width_ratio,
    )

# 插值，封装nn的插值方法，属性名一样
class Interpolate(nn.Module):
    """Interpolation module."""

    def __init__(self, scale_factor, mode, align_corners=False):
        """Init.
        Args:
            scale_factor (float): scaling
            mode (str): interpolation mode
        """
        super(Interpolate, self).__init__()

        self.interp = nn.functional.interpolate
        self.scale_factor = scale_factor
        self.mode = mode
        self.align_corners = align_corners

    def forward(self, x):
        """Forward pass.
        Args:
            x (tensor): input
        Returns:
            tensor: interpolated data
        """

        x = self.interp(
            x,
            scale_factor=self.scale_factor,
            mode=self.mode,
            align_corners=self.align_corners,
        )

        return x

# 密集预测输出适配器
class DPTOutputAdapter(nn.Module):
    """DPT output adapter.

    :param num_cahnnels: Number of output channels
    :param stride_level: stride level compared to the full-sized image.
        E.g. 4 for 1/4th the size of the image.
    :param patch_size_full: Int or tuple of the patch size over the full image size.
        Patch size for smaller inputs will be computed accordingly.
    :param hooks: Index of intermediate layers
    :param layer_dims: Dimension of intermediate layers
    :param feature_dim: Feature dimension
    :param use_bn: If set to True, activates batch norm
    :param dim_tokens_enc:  Dimension of tokens coming from encoder
    """

    def __init__(self,
                 num_channels: int = 1, # 输出通道，默认有置信度通道(2)
                 stride_level: int = 1, # 步长级别
                 patch_size: Union[int, Tuple[int, int]] = 16, # 补丁尺寸，接受int或元组
                 main_tasks: Iterable[str] = ('rgb',),
                 hooks: List[int] = [2, 5, 8, 11], # 中间层索引，微调：编码器24，解码器12，[23,27,31,35]
                 layer_dims: List[int] = [96, 192, 384, 768], # 中间层维度，改变张量形状后是中间层通道
                 feature_dim: int = 256, # 特征通道，改变中间层通道
                 use_bn: bool = False, # 使用批归一化
                 dim_tokens_enc: Optional[int] = None, # 编码器嵌入维度
                 head_type: str = 'regression', # 输出头类型，回归或语义分割，默认回归
                 output_width_ratio=1, # 输出宽度比例
                 **kwargs):
        super().__init__()
        self.num_channels = num_channels
        self.stride_level = stride_level
        self.patch_size = pair(patch_size) # 补丁尺寸元组
        self.main_tasks = main_tasks
        self.hooks = hooks
        self.layer_dims = layer_dims
        self.feature_dim = feature_dim
        self.dim_tokens_enc = dim_tokens_enc * len(self.main_tasks) if dim_tokens_enc is not None else None # 编码器嵌入维度*任务数量(默认1)
        self.head_type = head_type

        # Actual patch height and width, taking into account stride of input，实际的补丁高度和宽度，考虑输入的步长
        self.P_H = max(1, self.patch_size[0] // stride_level)
        self.P_W = max(1, self.patch_size[1] // stride_level)

        self.scratch = make_scratch(layer_dims, feature_dim, groups=1, expand=False) # 改变所有中间层通道为特征通道(256)，输入中间层通道List，特征通道(256)

        self.scratch.refinenet1 = make_fusion_block(feature_dim, use_bn, output_width_ratio) # 融合块，输入特征通道(256)，是否使用批归一化(False)，输出宽度比例(1)
        self.scratch.refinenet2 = make_fusion_block(feature_dim, use_bn, output_width_ratio)
        self.scratch.refinenet3 = make_fusion_block(feature_dim, use_bn, output_width_ratio)
        self.scratch.refinenet4 = make_fusion_block(feature_dim, use_bn, output_width_ratio)

        if self.head_type == 'regression': # 输出头类型，回归
            # The "DPTDepthModel" head
            self.head = nn.Sequential(
                nn.Conv2d(feature_dim, feature_dim // 2, kernel_size=3, stride=1, padding=1), # 2d卷积，输入特征通道(256)，输出128通道，尺寸不变
                Interpolate(scale_factor=2, mode="bilinear", align_corners=True), # 双线性插值，缩放到原尺寸的两倍
                nn.Conv2d(feature_dim // 2, 32, kernel_size=3, stride=1, padding=1), # 2d卷积，输入128通道，输出32通道，尺寸不变
                nn.ReLU(True), # ReLU，原地操作
                nn.Conv2d(32, self.num_channels, kernel_size=1, stride=1, padding=0) # 2d卷积，输入32通道，输出2通道(默认有置信度)，尺寸不变
            )
        elif self.head_type == 'semseg': # 输出头类型，语义分割
            # The "DPTSegmentationModel" head
            self.head = nn.Sequential(
                nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1, bias=False),
                nn.BatchNorm2d(feature_dim) if use_bn else nn.Identity(),
                nn.ReLU(True),
                nn.Dropout(0.1, False),
                nn.Conv2d(feature_dim, self.num_channels, kernel_size=1),
                Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
            )
        else:
            raise ValueError('DPT head_type must be "regression" or "semseg".')

        if self.dim_tokens_enc is not None:
            self.init(dim_tokens_enc=dim_tokens_enc) # 输入编码器嵌入维度(768)

    # 初始化
    def init(self, dim_tokens_enc=768):
        """
        Initialize parts of decoder that are dependent on dimension of encoder tokens.
        Should be called when setting up MultiMAE.

        :param dim_tokens_enc: Dimension of tokens coming from encoder
        """
        #print(dim_tokens_enc)

        # Set up activation postprocessing layers
        if isinstance(dim_tokens_enc, int): # 若编码器嵌入维度为int，则返回4个相同元素的列表
            dim_tokens_enc = 4 * [dim_tokens_enc]

        self.dim_tokens_enc = [dt * len(self.main_tasks) for dt in dim_tokens_enc] # 编码器嵌入维度每个元素*任务数量(默认1)

        # 对中间层0做后处理，中间层通道->新通道，尺寸*4
        self.act_1_postprocess = nn.Sequential(
            nn.Conv2d( # 2d卷积，调整通道，尺寸不变
                in_channels=self.dim_tokens_enc[0],
                out_channels=self.layer_dims[0],
                kernel_size=1, stride=1, padding=0,
            ),
            nn.ConvTranspose2d( # 2d转置卷积，通道不变，尺寸*4
                in_channels=self.layer_dims[0],
                out_channels=self.layer_dims[0],
                kernel_size=4, stride=4, padding=0,
                bias=True, dilation=1, groups=1,
            )
        )

        # 对中间层1做后处理，中间层通道->新通道，尺寸*2
        self.act_2_postprocess = nn.Sequential(
            nn.Conv2d( # 2d卷积，调整通道，尺寸不变
                in_channels=self.dim_tokens_enc[1],
                out_channels=self.layer_dims[1],
                kernel_size=1, stride=1, padding=0,
            ),
            nn.ConvTranspose2d( # 2d转置卷积，通道不变，尺寸*2
                in_channels=self.layer_dims[1],
                out_channels=self.layer_dims[1],
                kernel_size=2, stride=2, padding=0,
                bias=True, dilation=1, groups=1,
            )
        )

        # 对中间层2做后处理，中间层通道->新通道，尺寸不变
        self.act_3_postprocess = nn.Sequential(
            nn.Conv2d( # 2d卷积，调整通道，尺寸不变
                in_channels=self.dim_tokens_enc[2],
                out_channels=self.layer_dims[2],
                kernel_size=1, stride=1, padding=0,
            )
        )

        # 对中间层3做后处理，中间层通道->新通道，尺寸/2
        self.act_4_postprocess = nn.Sequential(
            nn.Conv2d( # 2d卷积，调整通道，尺寸不变
                in_channels=self.dim_tokens_enc[3],
                out_channels=self.layer_dims[3],
                kernel_size=1, stride=1, padding=0,
            ),
            nn.Conv2d( # 2d卷积，通道不变，尺寸/2
                in_channels=self.layer_dims[3],
                out_channels=self.layer_dims[3],
                kernel_size=3, stride=2, padding=1,
            )
        )

        # 将所有中间层后处理打包，中间层通道->新通道[96,192,384,768]，尺寸改变为*4、*2、不变、/2
        self.act_postprocess = nn.ModuleList([
            self.act_1_postprocess,
            self.act_2_postprocess,
            self.act_3_postprocess,
            self.act_4_postprocess
        ])

    def adapt_tokens(self, encoder_tokens):
        # Adapt tokens
        x = []
        x.append(encoder_tokens[:, :])
        x = torch.cat(x, dim=-1)
        return x

    def forward(self, encoder_tokens: List[torch.Tensor], image_size): # 输入所有块输出(编码器1+解码器，List)，预测图像尺寸
            #input_info: Dict):
        assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first'
        H, W = image_size # 预测目标图像尺寸

        # Number of patches in height and width
        N_H = H // (self.stride_level * self.P_H) # 补丁数量(在高维度上)
        N_W = W // (self.stride_level * self.P_W)

        # Hook decoder onto 4 layers from specified ViT layers
        layers = [encoder_tokens[hook] for hook in self.hooks] # 从所有块输出(编码器1+解码器，List)中按hooks提供的索引提取对应4个输出

        # Extract only task-relevant tokens and ignore global tokens. 提取仅与任务相关的tokens，并忽略全局tokens
        layers = [self.adapt_tokens(l) for l in layers] # [(batch_size,嵌入长度,嵌入维度)]*4

        # Reshape tokens to spatial representation，将令牌token表示转换为空间(张量)表示
        layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] # 改变张量形状为4维，(nh nw)表示高和宽上的补丁数量乘积，即嵌入长度，嵌入维度为通道

        layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] # 对所有中间层做后处理，中间层通道->新通道[96,192,384,768]，尺寸改变为*4、*2、不变、/2
        # Project layers to chosen feature dim，改变所有中间层通道为特征通道(256)
        layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] # 改变通道，中间层通道[96,192,384,768]->特征通道[256]*4，尺寸不变，[(batch_size,256,高补丁数量，宽补丁数量)]*4

        # Fuse layers using refinement stages，细化阶段的融合层
        path_4 = self.scratch.refinenet4(layers[3]) # 调整特征3尺寸
        path_3 = self.scratch.refinenet3(path_4, layers[2]) # 调整特征2尺寸，融合特征3
        path_2 = self.scratch.refinenet2(path_3, layers[1])
        path_1 = self.scratch.refinenet1(path_2, layers[0])

        # Output head，输出头
        out = self.head(path_1) # 默认回归类型输出头，输出2通道(默认有置信度)，尺寸*2

        return out
